Compare commits

..

1 commit

Author SHA1 Message Date
Vivian Lim ⭐ dafa35a311
Merge 2cd5abfdcf into 6f4cb2f14e 2024-11-06 08:29:58 +08:00
38 changed files with 434 additions and 1122 deletions

4
go.mod
View file

@ -12,7 +12,7 @@ require (
codeberg.org/gruf/go-debug v1.3.0
codeberg.org/gruf/go-errors/v2 v2.3.2
codeberg.org/gruf/go-fastcopy v1.1.3
codeberg.org/gruf/go-ffmpreg v0.6.0
codeberg.org/gruf/go-ffmpreg v0.4.2
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf
codeberg.org/gruf/go-kv v1.6.5
codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f
@ -43,7 +43,7 @@ require (
github.com/miekg/dns v1.1.62
github.com/minio/minio-go/v7 v7.0.80
github.com/mitchellh/mapstructure v1.5.0
github.com/ncruces/go-sqlite3 v0.20.2
github.com/ncruces/go-sqlite3 v0.20.0
github.com/oklog/ulid v1.3.1
github.com/prometheus/client_golang v1.20.5
github.com/spf13/cobra v1.8.1

8
go.sum generated
View file

@ -46,8 +46,8 @@ codeberg.org/gruf/go-fastcopy v1.1.3 h1:Jo9VTQjI6KYimlw25PPc7YLA3Xm+XMQhaHwKnM7x
codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s=
codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0=
codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q=
codeberg.org/gruf/go-ffmpreg v0.6.0 h1:/cfUJ9bFKEoXT9LDYZy3eZ0HF60YWcO+0nGciepJKMw=
codeberg.org/gruf/go-ffmpreg v0.6.0/go.mod h1:Ar5nbt3tB2Wr0uoaqV3wDBNwAx+H+AB/mV7Kw7NlZTI=
codeberg.org/gruf/go-ffmpreg v0.4.2 h1:HKkPapm/PWkxsnUdjyQOGpwl5Qoa2EBrUQ09s4R4/FA=
codeberg.org/gruf/go-ffmpreg v0.4.2/go.mod h1:Ar5nbt3tB2Wr0uoaqV3wDBNwAx+H+AB/mV7Kw7NlZTI=
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A=
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk=
codeberg.org/gruf/go-kv v1.6.5 h1:ttPf0NA8F79pDqBttSudPTVCZmGncumeNIxmeM9ztz0=
@ -432,8 +432,8 @@ github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-sqlite3 v0.20.2 h1:cMLIwrLZQuCWVCEOowSqlIlpzgbag3jnYVW4NM5u01M=
github.com/ncruces/go-sqlite3 v0.20.2/go.mod h1:yL4ZNWGsr1/8pcLfpPW1RT1WFdvyeHonrgIwwi4rvkg=
github.com/ncruces/go-sqlite3 v0.20.0 h1:/nBLvYxj7sk9S6y57nmMFvoQ/KJtGo0pNi8J80s8oJU=
github.com/ncruces/go-sqlite3 v0.20.0/go.mod h1:yL4ZNWGsr1/8pcLfpPW1RT1WFdvyeHonrgIwwi4rvkg=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=

View file

@ -181,10 +181,6 @@ func ffmpeg(ctx context.Context, inpath string, outpath string, args ...string)
}
fscfg = fscfg.WithFSMount(shared, path.Dir(inpath))
// Set anonymous module name.
modcfg = modcfg.WithName("")
// Update with prepared fs config.
return modcfg.WithFSConfig(fscfg)
},
})
@ -251,10 +247,6 @@ func ffprobe(ctx context.Context, filepath string) (*result, error) {
}
fscfg = fscfg.WithFSMount(in, path.Dir(filepath))
// Set anonymous module name.
modcfg = modcfg.WithName("")
// Update with prepared fs config.
return modcfg.WithFSConfig(fscfg)
},
})

View file

@ -21,7 +21,6 @@
import (
"context"
"errors"
"codeberg.org/gruf/go-ffmpreg/wasm"
)
@ -36,25 +35,12 @@
// prepares the runner to only allow max given concurrent running instances.
func InitFfmpeg(ctx context.Context, max int) error {
ffmpegRunner.Init(max)
return initWASM(ctx)
return compileFfmpeg(ctx)
}
// Ffmpeg runs the given arguments with an instance of ffmpeg.
func Ffmpeg(ctx context.Context, args Args) (uint32, error) {
return ffmpegRunner.Run(ctx, func() (uint32, error) {
// Load WASM rt and module.
ffmpreg := ffmpreg.Load()
if ffmpreg == nil {
return 0, errors.New("wasm not initialized")
}
// Call into ffmpeg.
args.Name = "ffmpeg"
return wasm.Run(ctx,
ffmpreg.run,
ffmpreg.mod,
args,
)
return wasm.Run(ctx, runtime, ffmpeg, args)
})
}

View file

@ -21,7 +21,6 @@
import (
"context"
"errors"
"codeberg.org/gruf/go-ffmpreg/wasm"
)
@ -36,25 +35,12 @@
// prepares the runner to only allow max given concurrent running instances.
func InitFfprobe(ctx context.Context, max int) error {
ffprobeRunner.Init(max)
return initWASM(ctx)
return compileFfprobe(ctx)
}
// Ffprobe runs the given arguments with an instance of ffprobe.
func Ffprobe(ctx context.Context, args Args) (uint32, error) {
return ffprobeRunner.Run(ctx, func() (uint32, error) {
// Load WASM rt and module.
ffmpreg := ffmpreg.Load()
if ffmpreg == nil {
return 0, errors.New("wasm not initialized")
}
// Call into ffprobe.
args.Name = "ffprobe"
return wasm.Run(ctx,
ffmpreg.run,
ffmpreg.mod,
args,
)
return wasm.Run(ctx, runtime, ffprobe, args)
})
}

View file

@ -22,27 +22,72 @@
import (
"context"
"os"
"sync/atomic"
"unsafe"
"codeberg.org/gruf/go-ffmpreg/embed"
ffmpeglib "codeberg.org/gruf/go-ffmpreg/embed/ffmpeg"
ffprobelib "codeberg.org/gruf/go-ffmpreg/embed/ffprobe"
"codeberg.org/gruf/go-ffmpreg/wasm"
"github.com/tetratelabs/wazero"
)
// ffmpreg is a concurrency-safe pointer
// to our necessary WebAssembly runtime
// and compiled ffmpreg module instance.
var ffmpreg atomic.Pointer[struct {
run wazero.Runtime
mod wazero.CompiledModule
}]
var (
// shared WASM runtime instance.
runtime wazero.Runtime
// initWASM safely prepares new WebAssembly runtime
// and compiles ffmpreg module instance, if the global
// pointer has not been already. else, is a no-op.
func initWASM(ctx context.Context) error {
if ffmpreg.Load() != nil {
// ffmpeg / ffprobe compiled WASM.
ffmpeg wazero.CompiledModule
ffprobe wazero.CompiledModule
)
// compileFfmpeg ensures the ffmpeg WebAssembly has been
// pre-compiled into memory. If already compiled is a no-op.
func compileFfmpeg(ctx context.Context) error {
if ffmpeg != nil {
return nil
}
// Ensure runtime already initialized.
if err := initRuntime(ctx); err != nil {
return err
}
// Compile the ffmpeg WebAssembly module into memory.
cmod, err := runtime.CompileModule(ctx, ffmpeglib.B)
if err != nil {
return err
}
// Set module.
ffmpeg = cmod
return nil
}
// compileFfprobe ensures the ffprobe WebAssembly has been
// pre-compiled into memory. If already compiled is a no-op.
func compileFfprobe(ctx context.Context) error {
if ffprobe != nil {
return nil
}
// Ensure runtime already initialized.
if err := initRuntime(ctx); err != nil {
return err
}
// Compile the ffprobe WebAssembly module into memory.
cmod, err := runtime.CompileModule(ctx, ffprobelib.B)
if err != nil {
return err
}
// Set module.
ffprobe = cmod
return nil
}
// initRuntime initializes the global wazero.Runtime,
// if already initialized this function is a no-op.
func initRuntime(ctx context.Context) (err error) {
if runtime != nil {
return nil
}
@ -60,59 +105,7 @@ func initWASM(ctx context.Context) error {
cfg = cfg.WithCompilationCache(cache)
}
var (
run wazero.Runtime
mod wazero.CompiledModule
err error
set bool
)
defer func() {
if err == nil && set {
// Drop binary.
embed.B = nil
return
}
// Close module.
if !isNil(mod) {
mod.Close(ctx)
}
// Close runtime.
if !isNil(run) {
run.Close(ctx)
}
}()
// Initialize new runtime from config.
run, err = wasm.NewRuntime(ctx, cfg)
if err != nil {
return err
}
// Compile ffmpreg WebAssembly into memory.
mod, err = run.CompileModule(ctx, embed.B)
if err != nil {
return err
}
// Try set global WASM runtime and module,
// or if beaten to it defer will handle close.
set = ffmpreg.CompareAndSwap(nil, &struct {
run wazero.Runtime
mod wazero.CompiledModule
}{
run: run,
mod: mod,
})
return nil
}
// isNil will safely check if 'v' is nil without
// dealing with weird Go interface nil bullshit.
func isNil(i interface{}) bool {
type eface struct{ Type, Data unsafe.Pointer }
return (*eface)(unsafe.Pointer(&i)).Data == nil
runtime, err = wasm.NewRuntime(ctx, cfg)
return
}

Binary file not shown.

View file

@ -0,0 +1,25 @@
package ffmpeg
import (
_ "embed"
"os"
)
func init() {
// Check for WASM source file path.
path := os.Getenv("FFMPEG_WASM")
if path == "" {
return
}
var err error
// Read file into memory.
B, err = os.ReadFile(path)
if err != nil {
panic(err)
}
}
//go:embed ffmpeg.wasm
var B []byte

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,25 @@
package ffprobe
import (
_ "embed"
"os"
)
func init() {
// Check for WASM source file path.
path := os.Getenv("FFPROBE_WASM")
if path == "" {
return
}
var err error
// Read file into memory.
B, err = os.ReadFile(path)
if err != nil {
panic(err)
}
}
//go:embed ffprobe.wasm
var B []byte

View file

@ -1,39 +0,0 @@
package embed
import (
"bytes"
"compress/gzip"
_ "embed"
"io"
"os"
)
func init() {
var err error
if path := os.Getenv("FFMPREG_WASM"); path != "" {
// Read file into memory.
B, err = os.ReadFile(path)
if err != nil {
panic(err)
}
}
// Wrap bytes in reader.
b := bytes.NewReader(B)
// Create unzipper from reader.
gz, err := gzip.NewReader(b)
if err != nil {
panic(err)
}
// Extract gzipped binary.
B, err = io.ReadAll(gz)
if err != nil {
panic(err)
}
}
//go:embed ffmpreg.wasm.gz
var B []byte

View file

@ -14,11 +14,6 @@
// wazero.Runtime on module instantiation.
type Args struct {
// Program name, depending on the
// module being run this may or may
// not be necessary.
Name string
// Optional further module configuration function.
// (e.g. to mount filesystem dir, set env vars, etc).
Config func(wazero.ModuleConfig) wazero.ModuleConfig
@ -44,7 +39,7 @@ func Run(
// Prefix arguments with module name.
cargs := make([]string, len(args.Args)+1)
cargs[0] = args.Name
cargs[0] = module.Name()
copy(cargs[1:], args.Args)
// Prepare new module configuration.

View file

@ -2,7 +2,6 @@
import (
"context"
"fmt"
"strconv"
"github.com/tetratelabs/wazero/api"
@ -71,15 +70,6 @@ func logCallback(ctx context.Context, mod api.Module, _, iCode, zMsg uint32) {
}
}
// Log writes a message into the error log established by [Conn.ConfigLog].
//
// https://sqlite.org/c3ref/log.html
func (c *Conn) Log(code ExtendedErrorCode, format string, a ...any) {
if c.log != nil {
c.log(code, fmt.Sprintf(format, a...))
}
}
// FileControl allows low-level control of database files.
// Only a subset of opcodes are supported.
//

View file

@ -89,7 +89,6 @@ func (ctx Context) ResultText(value string) {
}
// ResultRawText sets the text result of the function to a []byte.
// Returning a nil slice is the same as calling [Context.ResultNull].
//
// https://sqlite.org/c3ref/result_blob.html
func (ctx Context) ResultRawText(value []byte) {

View file

@ -106,11 +106,6 @@ func (e ErrorCode) Temporary() bool {
return e == BUSY
}
// ExtendedCode returns the extended error code for this error.
func (e ErrorCode) ExtendedCode() ExtendedErrorCode {
return ExtendedErrorCode(e)
}
// Error implements the error interface.
func (e ExtendedErrorCode) Error() string {
return util.ErrorCodeString(uint32(e))
@ -141,11 +136,6 @@ func (e ExtendedErrorCode) Timeout() bool {
return e == BUSY_TIMEOUT
}
// Code returns the primary error code for this error.
func (e ExtendedErrorCode) Code() ErrorCode {
return ErrorCode(e)
}
func errorCode(err error, def ErrorCode) (msg string, code uint32) {
switch code := err.(type) {
case nil:

View file

@ -1,4 +1,4 @@
//go:build unix && !sqlite3_nosys
//go:build unix && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_noshm || sqlite3_nosys)
package util
@ -55,10 +55,10 @@ type MappedRegion struct {
used bool
}
func MapRegion(ctx context.Context, mod api.Module, f *os.File, offset int64, size int32, readOnly bool) (*MappedRegion, error) {
func MapRegion(ctx context.Context, mod api.Module, f *os.File, offset int64, size int32, prot int) (*MappedRegion, error) {
s := ctx.Value(moduleKey{}).(*moduleState)
r := s.new(ctx, mod, size)
err := r.mmap(f, offset, readOnly)
err := r.mmap(f, offset, prot)
if err != nil {
return nil, err
}
@ -75,11 +75,7 @@ func (r *MappedRegion) Unmap() error {
return err
}
func (r *MappedRegion) mmap(f *os.File, offset int64, readOnly bool) error {
prot := unix.PROT_READ
if !readOnly {
prot |= unix.PROT_WRITE
}
func (r *MappedRegion) mmap(f *os.File, offset int64, prot int) error {
_, err := unix.MmapPtr(int(f.Fd()), offset, r.addr, uintptr(r.size),
prot, unix.MAP_SHARED|unix.MAP_FIXED)
r.used = err == nil

View file

@ -1,4 +1,4 @@
//go:build !unix || sqlite3_nosys
//go:build !unix || !(386 || arm || amd64 || arm64 || riscv64 || ppc64le) || sqlite3_noshm || sqlite3_nosys
package util

View file

@ -1,53 +0,0 @@
//go:build !sqlite3_nosys
package util
import (
"context"
"os"
"reflect"
"unsafe"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/windows"
)
type MappedRegion struct {
windows.Handle
Data []byte
addr uintptr
}
func MapRegion(ctx context.Context, mod api.Module, f *os.File, offset int64, size int32) (*MappedRegion, error) {
h, err := windows.CreateFileMapping(windows.Handle(f.Fd()), nil, windows.PAGE_READWRITE, 0, 0, nil)
if h == 0 {
return nil, err
}
a, err := windows.MapViewOfFile(h, windows.FILE_MAP_WRITE,
uint32(offset>>32), uint32(offset), uintptr(size))
if a == 0 {
windows.CloseHandle(h)
return nil, err
}
res := &MappedRegion{Handle: h, addr: a}
// SliceHeader, although deprecated, avoids a go vet warning.
sh := (*reflect.SliceHeader)(unsafe.Pointer(&res.Data))
sh.Len = int(size)
sh.Cap = int(size)
sh.Data = a
return res, nil
}
func (r *MappedRegion) Unmap() error {
if r.Data == nil {
return nil
}
err := windows.UnmapViewOfFile(r.addr)
if err != nil {
return err
}
r.Data = nil
return windows.CloseHandle(r.Handle)
}

View file

@ -255,7 +255,6 @@ func (s *Stmt) BindText(param int, value string) error {
// BindRawText binds a []byte to the prepared statement as text.
// The leftmost SQL parameter has an index of 1.
// Binding a nil slice is the same as calling [Stmt.BindNull].
//
// https://sqlite.org/c3ref/bind_blob.html
func (s *Stmt) BindRawText(param int, value []byte) error {

View file

@ -15,23 +15,24 @@ The main differences are [file locking](#file-locking) and [WAL mode](#write-ahe
POSIX advisory locks, which SQLite uses on Unix, are
[broken by design](https://github.com/sqlite/sqlite/blob/b74eb0/src/os_unix.c#L1073-L1161).
Instead, on Linux and macOS, this package uses
On Linux and macOS, this package uses
[OFD locks](https://www.gnu.org/software/libc/manual/html_node/Open-File-Description-Locks.html)
to synchronize access to database files.
OFD locks are fully compatible with POSIX advisory locks.
This package can also use
[BSD locks](https://man.freebsd.org/cgi/man.cgi?query=flock&sektion=2),
albeit with reduced concurrency (`BEGIN IMMEDIATE` behaves like `BEGIN EXCLUSIVE`).
On BSD, macOS, and illumos, BSD locks are fully compatible with POSIX advisory locks;
on Linux and z/OS, they are fully functional, but incompatible;
elsewhere, they are very likely broken.
BSD locks are the default on BSD and illumos,
but you can opt into them with the `sqlite3_flock` build tag.
On Windows, this package uses `LockFileEx` and `UnlockFileEx`,
like SQLite.
You can also opt into a cross-platform locking implementation
with the `sqlite3_dotlk` build tag.
The only requirement is an atomic `os.Mkdir`.
Otherwise, file locking is not supported, and you must use
[`nolock=1`](https://sqlite.org/uri.html#urinolock)
(or [`immutable=1`](https://sqlite.org/uri.html#uriimmutable))
@ -45,7 +46,7 @@ to check if your build supports file locking.
### Write-Ahead Logging
On Unix, this package may use `mmap` to implement
On little-endian Unix, this package uses `mmap` to implement
[shared-memory for the WAL-index](https://sqlite.org/wal.html#implementation_of_shared_memory_for_the_wal_index),
like SQLite.
@ -54,11 +55,6 @@ a WAL database can only be accessed by a single proccess.
Other processes that attempt to access a database locked with BSD locks,
will fail with the [`SQLITE_PROTOCOL`](https://sqlite.org/rescode.html#protocol) error code.
On Windows, this package may use `MapViewOfFile`, like SQLite.
You can also opt into a cross-platform, in-process, memory sharing implementation
with the `sqlite3_dotlk` build tag.
Otherwise, [WAL support is limited](https://sqlite.org/wal.html#noshm),
and `EXCLUSIVE` locking mode must be set to create, read, and write WAL databases.
To use `EXCLUSIVE` locking mode with the
@ -71,7 +67,7 @@ to check if your build supports shared memory.
### Batch-Atomic Write
On Linux, this package may support
On 64-bit Linux, this package supports
[batch-atomic writes](https://sqlite.org/cgi/src/technote/714)
on the F2FS filesystem.
@ -90,27 +86,27 @@ The implementation is compatible with SQLite's
### Build Tags
The VFS can be customized with a few build tags:
- `sqlite3_flock` forces the use of BSD locks.
- `sqlite3_dotlk` forces the use of dot-file locks.
- `sqlite3_nosys` prevents importing [`x/sys`](https://pkg.go.dev/golang.org/x/sys).
- `sqlite3_flock` forces the use of BSD locks; it can be used on z/OS to enable locking,
and elsewhere to test BSD locks.
- `sqlite3_nosys` prevents importing [`x/sys`](https://pkg.go.dev/golang.org/x/sys);
disables locking _and_ shared memory on all platforms.
- `sqlite3_noshm` disables shared memory on all platforms.
> [!IMPORTANT]
> The default configuration of this package is compatible with the standard
> [Unix and Windows SQLite VFSes](https://sqlite.org/vfs.html#multiple_vfses);
> `sqlite3_flock` builds are compatible with the
> [`unix-flock` VFS](https://sqlite.org/compile.html#enable_locking_style);
> `sqlite3_dotlk` builds are compatible with the
> [`unix-dotfile` VFS](https://sqlite.org/compile.html#enable_locking_style).
> [`unix-flock` VFS](https://sqlite.org/compile.html#enable_locking_style).
> If incompatible file locking is used, accessing databases concurrently with
> _other_ SQLite libraries will eventually corrupt data.
### Custom VFSes
- [`github.com/ncruces/go-sqlite3/vfs/adiantum`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/adiantum)
wraps a VFS to offer encryption at rest.
- [`github.com/ncruces/go-sqlite3/vfs/memdb`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/memdb)
implements an in-memory VFS.
- [`github.com/ncruces/go-sqlite3/vfs/readervfs`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/readervfs)
implements a VFS for immutable databases.
- [`github.com/ncruces/go-sqlite3/vfs/adiantum`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/adiantum)
wraps a VFS to offer encryption at rest.
- [`github.com/ncruces/go-sqlite3/vfs/xts`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/xts)
wraps a VFS to offer encryption at rest.

View file

@ -234,8 +234,4 @@ func (e _ErrorCode) Error() string {
_SHM_LOCK _ShmFlag = 2
_SHM_SHARED _ShmFlag = 4
_SHM_EXCLUSIVE _ShmFlag = 8
_SHM_NLOCK = 8
_SHM_BASE = 120
_SHM_DMS = _SHM_BASE + _SHM_NLOCK
)

View file

@ -35,10 +35,10 @@ func testSymlinks(path string) error {
func (vfsOS) Delete(path string, syncDir bool) error {
err := os.Remove(path)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return _IOERR_DELETE_NOENT
}
if err != nil {
return err
}
if runtime.GOOS != "windows" && syncDir {
@ -151,7 +151,6 @@ func (f *vfsFile) Close() error {
if f.shm != nil {
f.shm.Close()
}
f.Unlock(LOCK_NONE)
return f.File.Close()
}
@ -207,10 +206,10 @@ func (f *vfsFile) HasMoved() (bool, error) {
return false, err
}
pi, err := os.Stat(f.Name())
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return true, nil
}
if err != nil {
return false, err
}
return !os.SameFile(fi, pi), nil

View file

@ -1,4 +1,4 @@
//go:build ((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk
//go:build (linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) && !sqlite3_nosys
package vfs

View file

@ -1,4 +1,4 @@
//go:build !(((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk)
//go:build !(linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) || sqlite3_nosys
package vfs

View file

@ -78,15 +78,19 @@ type memDB struct {
// +checklocks:dataMtx
data []*[sectorSize]byte
// +checklocks:dataMtx
size int64
// +checklocks:memoryMtx
refs int32
// +checklocks:lockMtx
shared int32
// +checklocks:lockMtx
reserved bool
// +checklocks:lockMtx
pending bool
shared int32 // +checklocks:lockMtx
pending bool // +checklocks:lockMtx
reserved bool // +checklocks:lockMtx
// +checklocks:memoryMtx
refs int
lockMtx sync.Mutex
dataMtx sync.RWMutex
@ -249,12 +253,12 @@ func (m *memFile) Unlock(lock vfs.LockLevel) error {
m.lockMtx.Lock()
defer m.lockMtx.Unlock()
if m.lock >= vfs.LOCK_RESERVED {
m.reserved = false
}
if m.lock >= vfs.LOCK_PENDING {
if m.pending && m.lock >= vfs.LOCK_PENDING {
m.pending = false
}
if m.reserved && m.lock >= vfs.LOCK_RESERVED {
m.reserved = false
}
if lock < vfs.LOCK_SHARED {
m.shared--
}

View file

@ -1,4 +1,4 @@
//go:build ((freebsd || openbsd || netbsd || dragonfly || illumos) && !(sqlite3_dotlk || sqlite3_nosys)) || sqlite3_flock
//go:build (freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) && !sqlite3_nosys
package vfs

View file

@ -1,143 +0,0 @@
//go:build sqlite3_dotlk
package vfs
import (
"errors"
"io/fs"
"os"
"sync"
)
var (
// +checklocks:vfsDotLocksMtx
vfsDotLocks = map[string]*vfsDotLocker{}
vfsDotLocksMtx sync.Mutex
)
type vfsDotLocker struct {
shared int // +checklocks:vfsDotLocksMtx
pending *os.File // +checklocks:vfsDotLocksMtx
reserved *os.File // +checklocks:vfsDotLocksMtx
}
func osGetSharedLock(file *os.File) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
err := os.Mkdir(name+".lock", 0777)
if errors.Is(err, fs.ErrExist) {
return _BUSY // Another process has the lock.
}
if err != nil {
return _IOERR_LOCK
}
locker = &vfsDotLocker{}
vfsDotLocks[name] = locker
}
if locker.pending != nil {
return _BUSY
}
locker.shared++
return _OK
}
func osGetReservedLock(file *os.File) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_LOCK
}
if locker.reserved != nil && locker.reserved != file {
return _BUSY
}
locker.reserved = file
return _OK
}
func osGetExclusiveLock(file *os.File, _ *LockLevel) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_LOCK
}
if locker.pending != nil && locker.pending != file {
return _BUSY
}
locker.pending = file
if locker.shared > 1 {
return _BUSY
}
return _OK
}
func osDowngradeLock(file *os.File, _ LockLevel) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_UNLOCK
}
if locker.reserved == file {
locker.reserved = nil
}
if locker.pending == file {
locker.pending = nil
}
return _OK
}
func osReleaseLock(file *os.File, state LockLevel) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_UNLOCK
}
if locker.shared == 1 {
err := os.Remove(name + ".lock")
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return _IOERR_UNLOCK
}
delete(vfsDotLocks, name)
}
if locker.reserved == file {
locker.reserved = nil
}
if locker.pending == file {
locker.pending = nil
}
locker.shared--
return _OK
}
func osCheckReservedLock(file *os.File) (bool, _ErrorCode) {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return false, _OK
}
return locker.reserved != nil, _OK
}

View file

@ -1,4 +1,4 @@
//go:build (linux || darwin) && !(sqlite3_flock || sqlite3_dotlk || sqlite3_nosys)
//go:build (linux || darwin) && !(sqlite3_flock || sqlite3_nosys)
package vfs

View file

@ -1,4 +1,4 @@
//go:build !(sqlite3_dotlk || sqlite3_nosys)
//go:build !sqlite3_nosys
package vfs

View file

@ -1,7 +1,20 @@
//go:build ((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk
//go:build (darwin || linux) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_flock || sqlite3_noshm || sqlite3_nosys)
package vfs
import (
"context"
"io"
"os"
"sync"
"time"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/unix"
"github.com/ncruces/go-sqlite3/internal/util"
)
// SupportsSharedMemory is false on platforms that do not support shared memory.
// To use [WAL without shared-memory], you need to set [EXCLUSIVE locking mode].
//
@ -9,6 +22,12 @@
// [EXCLUSIVE locking mode]: https://sqlite.org/pragma.html#pragma_locking_mode
const SupportsSharedMemory = true
const (
_SHM_NLOCK = 8
_SHM_BASE = 120
_SHM_DMS = _SHM_BASE + _SHM_NLOCK
)
func (f *vfsFile) SharedMemory() SharedMemory { return f.shm }
// NewSharedMemory returns a shared-memory WAL-index
@ -22,5 +41,172 @@ func NewSharedMemory(path string, flags OpenFlag) SharedMemory {
if flags&OPEN_MAIN_DB == 0 || flags&(OPEN_DELETEONCLOSE|OPEN_MEMORY) != 0 {
return nil
}
return &vfsShm{path: path}
return &vfsShm{
path: path,
readOnly: flags&OPEN_READONLY != 0,
}
}
var _ blockingSharedMemory = &vfsShm{}
type vfsShm struct {
*os.File
path string
regions []*util.MappedRegion
readOnly bool
blocking bool
sync.Mutex
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.File == nil {
var flag int
if s.readOnly {
flag = unix.O_RDONLY
} else {
flag = unix.O_RDWR
}
f, err := os.OpenFile(s.path,
flag|unix.O_CREAT|unix.O_NOFOLLOW, 0666)
if err != nil {
return _CANTOPEN
}
s.File = f
}
// Dead man's switch.
if lock, rc := osTestLock(s.File, _SHM_DMS, 1); rc != _OK {
return _IOERR_LOCK
} else if lock == unix.F_WRLCK {
return _BUSY
} else if lock == unix.F_UNLCK {
if s.readOnly {
return _READONLY_CANTINIT
}
// Do not use a blocking lock here.
// If the lock cannot be obtained immediately,
// it means some other connection is truncating the file.
// And after it has done so, it will not release its lock,
// but only downgrade it to a shared lock.
// So no point in blocking here.
// The call below to obtain the shared DMS lock may use a blocking lock.
if rc := osWriteLock(s.File, _SHM_DMS, 1, 0); rc != _OK {
return rc
}
if err := s.Truncate(0); err != nil {
return _IOERR_SHMOPEN
}
}
if rc := osReadLock(s.File, _SHM_DMS, 1, time.Millisecond); rc != _OK {
return rc
}
return _OK
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
// Ensure size is a multiple of the OS page size.
if int(size)&(unix.Getpagesize()-1) != 0 {
return 0, _IOERR_SHMMAP
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
// Check if file is big enough.
o, err := s.Seek(0, io.SeekEnd)
if err != nil {
return 0, _IOERR_SHMSIZE
}
if n := (int64(id) + 1) * int64(size); n > o {
if !extend {
return 0, _OK
}
err := osAllocate(s.File, n)
if err != nil {
return 0, _IOERR_SHMSIZE
}
}
var prot int
if s.readOnly {
prot = unix.PROT_READ
} else {
prot = unix.PROT_READ | unix.PROT_WRITE
}
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, prot)
if err != nil {
return 0, _IOERR_SHMMAP
}
s.regions = append(s.regions, r)
if s.readOnly {
return r.Ptr, _READONLY
}
return r.Ptr, _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
// Argument check.
if n <= 0 || offset < 0 || offset+n > _SHM_NLOCK {
panic(util.AssertErr())
}
switch flags {
case
_SHM_LOCK | _SHM_SHARED,
_SHM_LOCK | _SHM_EXCLUSIVE,
_SHM_UNLOCK | _SHM_SHARED,
_SHM_UNLOCK | _SHM_EXCLUSIVE:
//
default:
panic(util.AssertErr())
}
if n != 1 && flags&_SHM_EXCLUSIVE == 0 {
panic(util.AssertErr())
}
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+int64(offset), int64(n))
case flags&_SHM_SHARED != 0:
return osReadLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
case flags&_SHM_EXCLUSIVE != 0:
return osWriteLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
default:
panic(util.AssertErr())
}
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.File == nil {
return
}
// Unmap regions.
for _, r := range s.regions {
r.Unmap()
}
clear(s.regions)
s.regions = s.regions[:0]
// Close the file.
if delete {
os.Remove(s.path)
}
s.Close()
s.File = nil
}
func (s *vfsShm) shmBarrier() {
s.Lock()
//lint:ignore SA2001 memory barrier.
s.Unlock()
}
func (s *vfsShm) shmEnableBlocking(block bool) {
s.blocking = block
}

View file

@ -1,4 +1,4 @@
//go:build ((freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_dotlk || sqlite3_nosys)) || sqlite3_flock
//go:build (freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_noshm || sqlite3_nosys)
package vfs
@ -14,14 +14,44 @@
"github.com/ncruces/go-sqlite3/internal/util"
)
// SupportsSharedMemory is false on platforms that do not support shared memory.
// To use [WAL without shared-memory], you need to set [EXCLUSIVE locking mode].
//
// [WAL without shared-memory]: https://sqlite.org/wal.html#noshm
// [EXCLUSIVE locking mode]: https://sqlite.org/pragma.html#pragma_locking_mode
const SupportsSharedMemory = true
const _SHM_NLOCK = 8
func (f *vfsFile) SharedMemory() SharedMemory { return f.shm }
// NewSharedMemory returns a shared-memory WAL-index
// backed by a file with the given path.
// It will return nil if shared-memory is not supported,
// or not appropriate for the given flags.
// Only [OPEN_MAIN_DB] databases may need a WAL-index.
// You must ensure all concurrent accesses to a database
// use shared-memory instances created with the same path.
func NewSharedMemory(path string, flags OpenFlag) SharedMemory {
if flags&OPEN_MAIN_DB == 0 || flags&(OPEN_DELETEONCLOSE|OPEN_MEMORY) != 0 {
return nil
}
return &vfsShm{
path: path,
readOnly: flags&OPEN_READONLY != 0,
}
}
type vfsShmFile struct {
*os.File
info os.FileInfo
refs int // +checklocks:vfsShmFilesMtx
// +checklocks:vfsShmFilesMtx
refs int
lock [_SHM_NLOCK]int16 // +checklocks:Mutex
sync.Mutex
// +checklocks:lockMtx
lock [_SHM_NLOCK]int16
lockMtx sync.Mutex
}
var (
@ -35,6 +65,7 @@ type vfsShm struct {
path string
lock [_SHM_NLOCK]bool
regions []*util.MappedRegion
readOnly bool
}
func (s *vfsShm) Close() error {
@ -49,7 +80,7 @@ func (s *vfsShm) Close() error {
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count.
if s.vfsShmFile.refs > 0 {
if s.vfsShmFile.refs > 1 {
s.vfsShmFile.refs--
s.vfsShmFile = nil
return nil
@ -66,7 +97,7 @@ func (s *vfsShm) Close() error {
panic(util.AssertErr())
}
func (s *vfsShm) shmOpen() _ErrorCode {
func (s *vfsShm) shmOpen() (rc _ErrorCode) {
if s.vfsShmFile != nil {
return _OK
}
@ -97,29 +128,34 @@ func (s *vfsShm) shmOpen() _ErrorCode {
}
}
// Lock and truncate the file.
// Lock and truncate the file, if not readonly.
// The lock is only released by closing the file.
if s.readOnly {
rc = _READONLY_CANTINIT
} else {
if rc := osLock(f, unix.LOCK_EX|unix.LOCK_NB, _IOERR_LOCK); rc != _OK {
return rc
}
if err := f.Truncate(0); err != nil {
return _IOERR_SHMOPEN
}
}
// Add the new shared file.
s.vfsShmFile = &vfsShmFile{
File: f,
info: fi,
refs: 1,
}
f = nil // Don't close the file.
for i, g := range vfsShmFiles {
if g == nil {
vfsShmFiles[i] = s.vfsShmFile
return _OK
return rc
}
}
vfsShmFiles = append(vfsShmFiles, s.vfsShmFile)
return _OK
return rc
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
@ -141,22 +177,32 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
if !extend {
return 0, _OK
}
if osAllocate(s.File, n) != nil {
err := osAllocate(s.File, n)
if err != nil {
return 0, _IOERR_SHMSIZE
}
}
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, false)
var prot int
if s.readOnly {
prot = unix.PROT_READ
} else {
prot = unix.PROT_READ | unix.PROT_WRITE
}
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, prot)
if err != nil {
return 0, _IOERR_SHMMAP
}
s.regions = append(s.regions, r)
if s.readOnly {
return r.Ptr, _READONLY
}
return r.Ptr, _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.Lock()
defer s.Unlock()
s.lockMtx.Lock()
defer s.lockMtx.Unlock()
switch {
case flags&_SHM_UNLOCK != 0:
@ -178,7 +224,7 @@ func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmFile.lock[i]+1 <= 0 {
if s.vfsShmFile.lock[i] < 0 {
return _BUSY
}
}
@ -215,7 +261,8 @@ func (s *vfsShm) shmUnmap(delete bool) {
for _, r := range s.regions {
r.Unmap()
}
s.regions = nil
clear(s.regions)
s.regions = s.regions[:0]
// Close the file.
if delete {
@ -225,7 +272,7 @@ func (s *vfsShm) shmUnmap(delete bool) {
}
func (s *vfsShm) shmBarrier() {
s.Lock()
s.lockMtx.Lock()
//lint:ignore SA2001 memory barrier.
s.Unlock()
s.lockMtx.Unlock()
}

View file

@ -1,84 +0,0 @@
//go:build (windows && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_dotlk
package vfs
import (
"unsafe"
"github.com/ncruces/go-sqlite3/internal/util"
)
const (
_WALINDEX_HDR_SIZE = 136
_WALINDEX_PGSZ = 32768
)
// This looks like a safe way of keeping the WAL-index in sync.
//
// The WAL-index file starts with a header,
// and the index doesn't meaningfully change if the header doesn't change.
//
// The header starts with two 48 byte, checksummed, copies of the same information,
// which are accessed independently between memory barriers.
// The checkpoint information that follows uses 4 byte aligned words.
//
// Finally, we have the WAL-index hash tables,
// which are only modified holding the exclusive WAL_WRITE_LOCK.
//
// Since all the data is either redundant+checksummed,
// 4 byte aligned, or modified under an exclusive lock,
// the copies below should correctly keep copies in sync.
//
// https://sqlite.org/walformat.html#the_wal_index_file_format
func (s *vfsShm) shmAcquire() {
if len(s.ptrs) == 0 || shmUnmodified(s.shadow[0][:], s.shared[0][:]) {
return
}
// Copies modified words from shared to private memory.
for id, p := range s.ptrs {
shared := shmPage(s.shared[id][:])
shadow := shmPage(s.shadow[id][:])
privat := shmPage(util.View(s.mod, p, _WALINDEX_PGSZ))
for i, shared := range shared {
if shadow[i] != shared {
shadow[i] = shared
privat[i] = shared
}
}
}
}
func (s *vfsShm) shmRelease() {
if len(s.ptrs) == 0 || shmUnmodified(s.shadow[0][:], util.View(s.mod, s.ptrs[0], _WALINDEX_HDR_SIZE)) {
return
}
// Copies modified words from private to shared memory.
for id, p := range s.ptrs {
shared := shmPage(s.shared[id][:])
shadow := shmPage(s.shadow[id][:])
privat := shmPage(util.View(s.mod, p, _WALINDEX_PGSZ))
for i, privat := range privat {
if shadow[i] != privat {
shadow[i] = privat
shared[i] = privat
}
}
}
}
func (s *vfsShm) shmBarrier() {
s.Lock()
s.shmAcquire()
s.shmRelease()
s.Unlock()
}
func shmPage(s []byte) *[_WALINDEX_PGSZ / 4]uint32 {
p := (*uint32)(unsafe.Pointer(unsafe.SliceData(s)))
return (*[_WALINDEX_PGSZ / 4]uint32)(unsafe.Slice(p, _WALINDEX_PGSZ/4))
}
func shmUnmodified(v1, v2 []byte) bool {
return *(*[_WALINDEX_HDR_SIZE]byte)(v1[:]) == *(*[_WALINDEX_HDR_SIZE]byte)(v2[:])
}

View file

@ -1,224 +0,0 @@
//go:build sqlite3_dotlk
package vfs
import (
"context"
"errors"
"io/fs"
"os"
"sync"
"github.com/ncruces/go-sqlite3/internal/util"
"github.com/tetratelabs/wazero/api"
)
type vfsShmBuffer struct {
shared [][_WALINDEX_PGSZ]byte
refs int // +checklocks:vfsShmBuffersMtx
lock [_SHM_NLOCK]int16 // +checklocks:Mutex
sync.Mutex
}
var (
// +checklocks:vfsShmBuffersMtx
vfsShmBuffers = map[string]*vfsShmBuffer{}
vfsShmBuffersMtx sync.Mutex
)
type vfsShm struct {
*vfsShmBuffer
mod api.Module
alloc api.Function
free api.Function
path string
shadow [][_WALINDEX_PGSZ]byte
ptrs []uint32
stack [1]uint64
lock [_SHM_NLOCK]bool
}
func (s *vfsShm) Close() error {
if s.vfsShmBuffer == nil {
return nil
}
vfsShmBuffersMtx.Lock()
defer vfsShmBuffersMtx.Unlock()
// Unlock everything.
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count.
if s.vfsShmBuffer.refs > 0 {
s.vfsShmBuffer.refs--
s.vfsShmBuffer = nil
return nil
}
err := os.Remove(s.path)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return _IOERR_UNLOCK
}
delete(vfsShmBuffers, s.path)
s.vfsShmBuffer = nil
return nil
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.vfsShmBuffer != nil {
return _OK
}
vfsShmBuffersMtx.Lock()
defer vfsShmBuffersMtx.Unlock()
// Find a shared buffer, increase the reference count.
if g, ok := vfsShmBuffers[s.path]; ok {
s.vfsShmBuffer = g
g.refs++
return _OK
}
// Create a directory on disk to ensure only this process
// uses this path to register a shared memory.
err := os.Mkdir(s.path, 0777)
if errors.Is(err, fs.ErrExist) {
return _BUSY
}
if err != nil {
return _IOERR_LOCK
}
// Add the new shared buffer.
s.vfsShmBuffer = &vfsShmBuffer{}
vfsShmBuffers[s.path] = s.vfsShmBuffer
return _OK
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
if size != _WALINDEX_PGSZ {
return 0, _IOERR_SHMMAP
}
if s.mod == nil {
s.mod = mod
s.free = mod.ExportedFunction("sqlite3_free")
s.alloc = mod.ExportedFunction("sqlite3_malloc64")
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
s.Lock()
defer s.Unlock()
defer s.shmAcquire()
// Extend shared memory.
if int(id) >= len(s.shared) {
if !extend {
return 0, _OK
}
s.shared = append(s.shared, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shared)+1)...)
}
// Allocate shadow memory.
if int(id) >= len(s.shadow) {
s.shadow = append(s.shadow, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shadow)+1)...)
s.shadow[0][4] = 1 // force invalidation
}
// Allocate local memory.
for int(id) >= len(s.ptrs) {
s.stack[0] = uint64(size)
if err := s.alloc.CallWithStack(ctx, s.stack[:]); err != nil {
panic(err)
}
if s.stack[0] == 0 {
panic(util.OOMErr)
}
clear(util.View(s.mod, uint32(s.stack[0]), _WALINDEX_PGSZ))
s.ptrs = append(s.ptrs, uint32(s.stack[0]))
}
return s.ptrs[id], _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.Lock()
defer s.Unlock()
switch {
case flags&_SHM_LOCK != 0:
defer s.shmAcquire()
case flags&_SHM_EXCLUSIVE != 0:
s.shmRelease()
}
switch {
case flags&_SHM_UNLOCK != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
if s.vfsShmBuffer.lock[i] == 0 {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i] <= 0 {
s.vfsShmBuffer.lock[i] = 0
} else {
s.vfsShmBuffer.lock[i]--
}
s.lock[i] = false
}
}
case flags&_SHM_SHARED != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i]+1 <= 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmBuffer.lock[i]++
s.lock[i] = true
}
case flags&_SHM_EXCLUSIVE != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i] != 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmBuffer.lock[i] = -1
s.lock[i] = true
}
default:
panic(util.AssertErr())
}
return _OK
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.vfsShmBuffer == nil {
return
}
defer s.Close()
s.Lock()
s.shmRelease()
defer s.Unlock()
for _, p := range s.ptrs {
s.stack[0] = uint64(p)
if err := s.free.CallWithStack(context.Background(), s.stack[:]); err != nil {
panic(err)
}
}
s.ptrs = nil
s.shadow = nil
}

View file

@ -1,168 +0,0 @@
//go:build (linux || darwin) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_flock || sqlite3_dotlk || sqlite3_nosys)
package vfs
import (
"context"
"io"
"os"
"sync"
"time"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/unix"
"github.com/ncruces/go-sqlite3/internal/util"
)
type vfsShm struct {
*os.File
path string
regions []*util.MappedRegion
readOnly bool
blocking bool
sync.Mutex
}
var _ blockingSharedMemory = &vfsShm{}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.File == nil {
f, err := os.OpenFile(s.path,
unix.O_RDWR|unix.O_CREAT|unix.O_NOFOLLOW, 0666)
if err != nil {
f, err = os.OpenFile(s.path,
unix.O_RDONLY|unix.O_CREAT|unix.O_NOFOLLOW, 0666)
s.readOnly = true
}
if err != nil {
return _CANTOPEN
}
s.File = f
}
// Dead man's switch.
if lock, rc := osTestLock(s.File, _SHM_DMS, 1); rc != _OK {
return _IOERR_LOCK
} else if lock == unix.F_WRLCK {
return _BUSY
} else if lock == unix.F_UNLCK {
if s.readOnly {
return _READONLY_CANTINIT
}
// Do not use a blocking lock here.
// If the lock cannot be obtained immediately,
// it means some other connection is truncating the file.
// And after it has done so, it will not release its lock,
// but only downgrade it to a shared lock.
// So no point in blocking here.
// The call below to obtain the shared DMS lock may use a blocking lock.
if rc := osWriteLock(s.File, _SHM_DMS, 1, 0); rc != _OK {
return rc
}
if err := s.Truncate(0); err != nil {
return _IOERR_SHMOPEN
}
}
return osReadLock(s.File, _SHM_DMS, 1, time.Millisecond)
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
// Ensure size is a multiple of the OS page size.
if int(size)&(unix.Getpagesize()-1) != 0 {
return 0, _IOERR_SHMMAP
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
// Check if file is big enough.
o, err := s.Seek(0, io.SeekEnd)
if err != nil {
return 0, _IOERR_SHMSIZE
}
if n := (int64(id) + 1) * int64(size); n > o {
if !extend {
return 0, _OK
}
if s.readOnly || osAllocate(s.File, n) != nil {
return 0, _IOERR_SHMSIZE
}
}
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, s.readOnly)
if err != nil {
return 0, _IOERR_SHMMAP
}
s.regions = append(s.regions, r)
if s.readOnly {
return r.Ptr, _READONLY
}
return r.Ptr, _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
// Argument check.
if n <= 0 || offset < 0 || offset+n > _SHM_NLOCK {
panic(util.AssertErr())
}
switch flags {
case
_SHM_LOCK | _SHM_SHARED,
_SHM_LOCK | _SHM_EXCLUSIVE,
_SHM_UNLOCK | _SHM_SHARED,
_SHM_UNLOCK | _SHM_EXCLUSIVE:
//
default:
panic(util.AssertErr())
}
if n != 1 && flags&_SHM_EXCLUSIVE == 0 {
panic(util.AssertErr())
}
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+int64(offset), int64(n))
case flags&_SHM_SHARED != 0:
return osReadLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
case flags&_SHM_EXCLUSIVE != 0:
return osWriteLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
default:
panic(util.AssertErr())
}
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.File == nil {
return
}
// Unmap regions.
for _, r := range s.regions {
r.Unmap()
}
s.regions = nil
// Close the file.
if delete {
os.Remove(s.path)
}
s.Close()
s.File = nil
}
func (s *vfsShm) shmBarrier() {
s.Lock()
//lint:ignore SA2001 memory barrier.
s.Unlock()
}
func (s *vfsShm) shmEnableBlocking(block bool) {
s.blocking = block
}

View file

@ -1,4 +1,4 @@
//go:build !(((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk)
//go:build !(darwin || linux || freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) || !(386 || arm || amd64 || arm64 || riscv64 || ppc64le) || sqlite3_noshm || sqlite3_nosys
package vfs

View file

@ -1,182 +0,0 @@
//go:build (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_dotlk || sqlite3_nosys)
package vfs
import (
"context"
"io"
"os"
"sync"
"time"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/windows"
"github.com/ncruces/go-sqlite3/internal/util"
"github.com/ncruces/go-sqlite3/util/osutil"
)
type vfsShm struct {
*os.File
mod api.Module
alloc api.Function
free api.Function
path string
regions []*util.MappedRegion
shared [][]byte
shadow [][_WALINDEX_PGSZ]byte
ptrs []uint32
stack [1]uint64
blocking bool
sync.Mutex
}
var _ blockingSharedMemory = &vfsShm{}
func (s *vfsShm) Close() error {
// Unmap regions.
for _, r := range s.regions {
r.Unmap()
}
s.regions = nil
// Close the file.
return s.File.Close()
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.File == nil {
f, err := osutil.OpenFile(s.path, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
return _CANTOPEN
}
s.File = f
}
// Dead man's switch.
if rc := osWriteLock(s.File, _SHM_DMS, 1, 0); rc == _OK {
err := s.Truncate(0)
osUnlock(s.File, _SHM_DMS, 1)
if err != nil {
return _IOERR_SHMOPEN
}
}
return osReadLock(s.File, _SHM_DMS, 1, time.Millisecond)
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
// Ensure size is a multiple of the OS page size.
if size != _WALINDEX_PGSZ || (windows.Getpagesize()-1)&_WALINDEX_PGSZ != 0 {
return 0, _IOERR_SHMMAP
}
if s.mod == nil {
s.mod = mod
s.free = mod.ExportedFunction("sqlite3_free")
s.alloc = mod.ExportedFunction("sqlite3_malloc64")
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
defer s.shmAcquire()
// Check if file is big enough.
o, err := s.Seek(0, io.SeekEnd)
if err != nil {
return 0, _IOERR_SHMSIZE
}
if n := (int64(id) + 1) * int64(size); n > o {
if !extend {
return 0, _OK
}
if osAllocate(s.File, n) != nil {
return 0, _IOERR_SHMSIZE
}
}
// Maps regions into memory.
for int(id) >= len(s.shared) {
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size)
if err != nil {
return 0, _IOERR_SHMMAP
}
s.regions = append(s.regions, r)
s.shared = append(s.shared, r.Data)
}
// Allocate shadow memory.
if int(id) >= len(s.shadow) {
s.shadow = append(s.shadow, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shadow)+1)...)
s.shadow[0][4] = 1 // force invalidation
}
// Allocate local memory.
for int(id) >= len(s.ptrs) {
s.stack[0] = uint64(size)
if err := s.alloc.CallWithStack(ctx, s.stack[:]); err != nil {
panic(err)
}
if s.stack[0] == 0 {
panic(util.OOMErr)
}
clear(util.View(s.mod, uint32(s.stack[0]), _WALINDEX_PGSZ))
s.ptrs = append(s.ptrs, uint32(s.stack[0]))
}
return s.ptrs[id], _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
switch {
case flags&_SHM_LOCK != 0:
defer s.shmAcquire()
case flags&_SHM_EXCLUSIVE != 0:
s.shmRelease()
}
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+uint32(offset), uint32(n))
case flags&_SHM_SHARED != 0:
return osReadLock(s.File, _SHM_BASE+uint32(offset), uint32(n), timeout)
case flags&_SHM_EXCLUSIVE != 0:
return osWriteLock(s.File, _SHM_BASE+uint32(offset), uint32(n), timeout)
default:
panic(util.AssertErr())
}
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.File == nil {
return
}
s.shmRelease()
// Free local memory.
for _, p := range s.ptrs {
s.stack[0] = uint64(p)
if err := s.free.CallWithStack(context.Background(), s.stack[:]); err != nil {
panic(err)
}
}
s.ptrs = nil
s.shadow = nil
s.shared = nil
// Close the file.
s.Close()
s.File = nil
if delete {
os.Remove(s.path)
}
}
func (s *vfsShm) shmEnableBlocking(block bool) {
s.blocking = block
}

7
vendor/modules.txt vendored
View file

@ -24,9 +24,10 @@ codeberg.org/gruf/go-fastcopy
# codeberg.org/gruf/go-fastpath/v2 v2.0.0
## explicit; go 1.14
codeberg.org/gruf/go-fastpath/v2
# codeberg.org/gruf/go-ffmpreg v0.6.0
# codeberg.org/gruf/go-ffmpreg v0.4.2
## explicit; go 1.22.0
codeberg.org/gruf/go-ffmpreg/embed
codeberg.org/gruf/go-ffmpreg/embed/ffmpeg
codeberg.org/gruf/go-ffmpreg/embed/ffprobe
codeberg.org/gruf/go-ffmpreg/wasm
# codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf
## explicit; go 1.21
@ -514,7 +515,7 @@ github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
# github.com/ncruces/go-sqlite3 v0.20.2
# github.com/ncruces/go-sqlite3 v0.20.0
## explicit; go 1.21
github.com/ncruces/go-sqlite3
github.com/ncruces/go-sqlite3/driver