Compare commits

..

3 commits

Author SHA1 Message Date
Vivian Lim ⭐ 4f1ffc1bfd
Merge 2cd5abfdcf into 45e1609377 2024-11-07 12:14:46 +00:00
kim 45e1609377
bump ncruces/go-sqlite3 to v0.20.2 (#3524) 2024-11-07 00:16:28 +00:00
kim b84637801a
[chore] update go ffmpreg to v0.6.0 (#3515)
* pull in go-ffmpreg v0.6.0

* add code comment

* grrr linter

* set empty module name when calling ffmpeg / ffprobe
2024-11-06 14:38:13 +01:00
38 changed files with 1122 additions and 434 deletions

4
go.mod
View file

@ -12,7 +12,7 @@ require (
codeberg.org/gruf/go-debug v1.3.0 codeberg.org/gruf/go-debug v1.3.0
codeberg.org/gruf/go-errors/v2 v2.3.2 codeberg.org/gruf/go-errors/v2 v2.3.2
codeberg.org/gruf/go-fastcopy v1.1.3 codeberg.org/gruf/go-fastcopy v1.1.3
codeberg.org/gruf/go-ffmpreg v0.4.2 codeberg.org/gruf/go-ffmpreg v0.6.0
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf
codeberg.org/gruf/go-kv v1.6.5 codeberg.org/gruf/go-kv v1.6.5
codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f
@ -43,7 +43,7 @@ require (
github.com/miekg/dns v1.1.62 github.com/miekg/dns v1.1.62
github.com/minio/minio-go/v7 v7.0.80 github.com/minio/minio-go/v7 v7.0.80
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/ncruces/go-sqlite3 v0.20.0 github.com/ncruces/go-sqlite3 v0.20.2
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_golang v1.20.5
github.com/spf13/cobra v1.8.1 github.com/spf13/cobra v1.8.1

8
go.sum generated
View file

@ -46,8 +46,8 @@ codeberg.org/gruf/go-fastcopy v1.1.3 h1:Jo9VTQjI6KYimlw25PPc7YLA3Xm+XMQhaHwKnM7x
codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s= codeberg.org/gruf/go-fastcopy v1.1.3/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s=
codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0= codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0=
codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q= codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q=
codeberg.org/gruf/go-ffmpreg v0.4.2 h1:HKkPapm/PWkxsnUdjyQOGpwl5Qoa2EBrUQ09s4R4/FA= codeberg.org/gruf/go-ffmpreg v0.6.0 h1:/cfUJ9bFKEoXT9LDYZy3eZ0HF60YWcO+0nGciepJKMw=
codeberg.org/gruf/go-ffmpreg v0.4.2/go.mod h1:Ar5nbt3tB2Wr0uoaqV3wDBNwAx+H+AB/mV7Kw7NlZTI= codeberg.org/gruf/go-ffmpreg v0.6.0/go.mod h1:Ar5nbt3tB2Wr0uoaqV3wDBNwAx+H+AB/mV7Kw7NlZTI=
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A= codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A=
codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk= codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk=
codeberg.org/gruf/go-kv v1.6.5 h1:ttPf0NA8F79pDqBttSudPTVCZmGncumeNIxmeM9ztz0= codeberg.org/gruf/go-kv v1.6.5 h1:ttPf0NA8F79pDqBttSudPTVCZmGncumeNIxmeM9ztz0=
@ -432,8 +432,8 @@ github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-sqlite3 v0.20.0 h1:/nBLvYxj7sk9S6y57nmMFvoQ/KJtGo0pNi8J80s8oJU= github.com/ncruces/go-sqlite3 v0.20.2 h1:cMLIwrLZQuCWVCEOowSqlIlpzgbag3jnYVW4NM5u01M=
github.com/ncruces/go-sqlite3 v0.20.0/go.mod h1:yL4ZNWGsr1/8pcLfpPW1RT1WFdvyeHonrgIwwi4rvkg= github.com/ncruces/go-sqlite3 v0.20.2/go.mod h1:yL4ZNWGsr1/8pcLfpPW1RT1WFdvyeHonrgIwwi4rvkg=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M= github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=

View file

@ -181,6 +181,10 @@ func ffmpeg(ctx context.Context, inpath string, outpath string, args ...string)
} }
fscfg = fscfg.WithFSMount(shared, path.Dir(inpath)) fscfg = fscfg.WithFSMount(shared, path.Dir(inpath))
// Set anonymous module name.
modcfg = modcfg.WithName("")
// Update with prepared fs config.
return modcfg.WithFSConfig(fscfg) return modcfg.WithFSConfig(fscfg)
}, },
}) })
@ -247,6 +251,10 @@ func ffprobe(ctx context.Context, filepath string) (*result, error) {
} }
fscfg = fscfg.WithFSMount(in, path.Dir(filepath)) fscfg = fscfg.WithFSMount(in, path.Dir(filepath))
// Set anonymous module name.
modcfg = modcfg.WithName("")
// Update with prepared fs config.
return modcfg.WithFSConfig(fscfg) return modcfg.WithFSConfig(fscfg)
}, },
}) })

View file

@ -21,6 +21,7 @@
import ( import (
"context" "context"
"errors"
"codeberg.org/gruf/go-ffmpreg/wasm" "codeberg.org/gruf/go-ffmpreg/wasm"
) )
@ -35,12 +36,25 @@
// prepares the runner to only allow max given concurrent running instances. // prepares the runner to only allow max given concurrent running instances.
func InitFfmpeg(ctx context.Context, max int) error { func InitFfmpeg(ctx context.Context, max int) error {
ffmpegRunner.Init(max) ffmpegRunner.Init(max)
return compileFfmpeg(ctx) return initWASM(ctx)
} }
// Ffmpeg runs the given arguments with an instance of ffmpeg. // Ffmpeg runs the given arguments with an instance of ffmpeg.
func Ffmpeg(ctx context.Context, args Args) (uint32, error) { func Ffmpeg(ctx context.Context, args Args) (uint32, error) {
return ffmpegRunner.Run(ctx, func() (uint32, error) { return ffmpegRunner.Run(ctx, func() (uint32, error) {
return wasm.Run(ctx, runtime, ffmpeg, args)
// Load WASM rt and module.
ffmpreg := ffmpreg.Load()
if ffmpreg == nil {
return 0, errors.New("wasm not initialized")
}
// Call into ffmpeg.
args.Name = "ffmpeg"
return wasm.Run(ctx,
ffmpreg.run,
ffmpreg.mod,
args,
)
}) })
} }

View file

@ -21,6 +21,7 @@
import ( import (
"context" "context"
"errors"
"codeberg.org/gruf/go-ffmpreg/wasm" "codeberg.org/gruf/go-ffmpreg/wasm"
) )
@ -35,12 +36,25 @@
// prepares the runner to only allow max given concurrent running instances. // prepares the runner to only allow max given concurrent running instances.
func InitFfprobe(ctx context.Context, max int) error { func InitFfprobe(ctx context.Context, max int) error {
ffprobeRunner.Init(max) ffprobeRunner.Init(max)
return compileFfprobe(ctx) return initWASM(ctx)
} }
// Ffprobe runs the given arguments with an instance of ffprobe. // Ffprobe runs the given arguments with an instance of ffprobe.
func Ffprobe(ctx context.Context, args Args) (uint32, error) { func Ffprobe(ctx context.Context, args Args) (uint32, error) {
return ffprobeRunner.Run(ctx, func() (uint32, error) { return ffprobeRunner.Run(ctx, func() (uint32, error) {
return wasm.Run(ctx, runtime, ffprobe, args)
// Load WASM rt and module.
ffmpreg := ffmpreg.Load()
if ffmpreg == nil {
return 0, errors.New("wasm not initialized")
}
// Call into ffprobe.
args.Name = "ffprobe"
return wasm.Run(ctx,
ffmpreg.run,
ffmpreg.mod,
args,
)
}) })
} }

View file

@ -22,72 +22,27 @@
import ( import (
"context" "context"
"os" "os"
"sync/atomic"
"unsafe"
ffmpeglib "codeberg.org/gruf/go-ffmpreg/embed/ffmpeg" "codeberg.org/gruf/go-ffmpreg/embed"
ffprobelib "codeberg.org/gruf/go-ffmpreg/embed/ffprobe"
"codeberg.org/gruf/go-ffmpreg/wasm" "codeberg.org/gruf/go-ffmpreg/wasm"
"github.com/tetratelabs/wazero" "github.com/tetratelabs/wazero"
) )
var ( // ffmpreg is a concurrency-safe pointer
// shared WASM runtime instance. // to our necessary WebAssembly runtime
runtime wazero.Runtime // and compiled ffmpreg module instance.
var ffmpreg atomic.Pointer[struct {
run wazero.Runtime
mod wazero.CompiledModule
}]
// ffmpeg / ffprobe compiled WASM. // initWASM safely prepares new WebAssembly runtime
ffmpeg wazero.CompiledModule // and compiles ffmpreg module instance, if the global
ffprobe wazero.CompiledModule // pointer has not been already. else, is a no-op.
) func initWASM(ctx context.Context) error {
if ffmpreg.Load() != nil {
// compileFfmpeg ensures the ffmpeg WebAssembly has been
// pre-compiled into memory. If already compiled is a no-op.
func compileFfmpeg(ctx context.Context) error {
if ffmpeg != nil {
return nil
}
// Ensure runtime already initialized.
if err := initRuntime(ctx); err != nil {
return err
}
// Compile the ffmpeg WebAssembly module into memory.
cmod, err := runtime.CompileModule(ctx, ffmpeglib.B)
if err != nil {
return err
}
// Set module.
ffmpeg = cmod
return nil
}
// compileFfprobe ensures the ffprobe WebAssembly has been
// pre-compiled into memory. If already compiled is a no-op.
func compileFfprobe(ctx context.Context) error {
if ffprobe != nil {
return nil
}
// Ensure runtime already initialized.
if err := initRuntime(ctx); err != nil {
return err
}
// Compile the ffprobe WebAssembly module into memory.
cmod, err := runtime.CompileModule(ctx, ffprobelib.B)
if err != nil {
return err
}
// Set module.
ffprobe = cmod
return nil
}
// initRuntime initializes the global wazero.Runtime,
// if already initialized this function is a no-op.
func initRuntime(ctx context.Context) (err error) {
if runtime != nil {
return nil return nil
} }
@ -105,7 +60,59 @@ func initRuntime(ctx context.Context) (err error) {
cfg = cfg.WithCompilationCache(cache) cfg = cfg.WithCompilationCache(cache)
} }
var (
run wazero.Runtime
mod wazero.CompiledModule
err error
set bool
)
defer func() {
if err == nil && set {
// Drop binary.
embed.B = nil
return
}
// Close module.
if !isNil(mod) {
mod.Close(ctx)
}
// Close runtime.
if !isNil(run) {
run.Close(ctx)
}
}()
// Initialize new runtime from config. // Initialize new runtime from config.
runtime, err = wasm.NewRuntime(ctx, cfg) run, err = wasm.NewRuntime(ctx, cfg)
return if err != nil {
return err
}
// Compile ffmpreg WebAssembly into memory.
mod, err = run.CompileModule(ctx, embed.B)
if err != nil {
return err
}
// Try set global WASM runtime and module,
// or if beaten to it defer will handle close.
set = ffmpreg.CompareAndSwap(nil, &struct {
run wazero.Runtime
mod wazero.CompiledModule
}{
run: run,
mod: mod,
})
return nil
}
// isNil will safely check if 'v' is nil without
// dealing with weird Go interface nil bullshit.
func isNil(i interface{}) bool {
type eface struct{ Type, Data unsafe.Pointer }
return (*eface)(unsafe.Pointer(&i)).Data == nil
} }

Binary file not shown.

View file

@ -1,25 +0,0 @@
package ffmpeg
import (
_ "embed"
"os"
)
func init() {
// Check for WASM source file path.
path := os.Getenv("FFMPEG_WASM")
if path == "" {
return
}
var err error
// Read file into memory.
B, err = os.ReadFile(path)
if err != nil {
panic(err)
}
}
//go:embed ffmpeg.wasm
var B []byte

Binary file not shown.

Binary file not shown.

View file

@ -1,25 +0,0 @@
package ffprobe
import (
_ "embed"
"os"
)
func init() {
// Check for WASM source file path.
path := os.Getenv("FFPROBE_WASM")
if path == "" {
return
}
var err error
// Read file into memory.
B, err = os.ReadFile(path)
if err != nil {
panic(err)
}
}
//go:embed ffprobe.wasm
var B []byte

39
vendor/codeberg.org/gruf/go-ffmpreg/embed/lib.go generated vendored Normal file
View file

@ -0,0 +1,39 @@
package embed
import (
"bytes"
"compress/gzip"
_ "embed"
"io"
"os"
)
func init() {
var err error
if path := os.Getenv("FFMPREG_WASM"); path != "" {
// Read file into memory.
B, err = os.ReadFile(path)
if err != nil {
panic(err)
}
}
// Wrap bytes in reader.
b := bytes.NewReader(B)
// Create unzipper from reader.
gz, err := gzip.NewReader(b)
if err != nil {
panic(err)
}
// Extract gzipped binary.
B, err = io.ReadAll(gz)
if err != nil {
panic(err)
}
}
//go:embed ffmpreg.wasm.gz
var B []byte

View file

@ -14,6 +14,11 @@
// wazero.Runtime on module instantiation. // wazero.Runtime on module instantiation.
type Args struct { type Args struct {
// Program name, depending on the
// module being run this may or may
// not be necessary.
Name string
// Optional further module configuration function. // Optional further module configuration function.
// (e.g. to mount filesystem dir, set env vars, etc). // (e.g. to mount filesystem dir, set env vars, etc).
Config func(wazero.ModuleConfig) wazero.ModuleConfig Config func(wazero.ModuleConfig) wazero.ModuleConfig
@ -39,7 +44,7 @@ func Run(
// Prefix arguments with module name. // Prefix arguments with module name.
cargs := make([]string, len(args.Args)+1) cargs := make([]string, len(args.Args)+1)
cargs[0] = module.Name() cargs[0] = args.Name
copy(cargs[1:], args.Args) copy(cargs[1:], args.Args)
// Prepare new module configuration. // Prepare new module configuration.

View file

@ -2,6 +2,7 @@
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
"github.com/tetratelabs/wazero/api" "github.com/tetratelabs/wazero/api"
@ -70,6 +71,15 @@ func logCallback(ctx context.Context, mod api.Module, _, iCode, zMsg uint32) {
} }
} }
// Log writes a message into the error log established by [Conn.ConfigLog].
//
// https://sqlite.org/c3ref/log.html
func (c *Conn) Log(code ExtendedErrorCode, format string, a ...any) {
if c.log != nil {
c.log(code, fmt.Sprintf(format, a...))
}
}
// FileControl allows low-level control of database files. // FileControl allows low-level control of database files.
// Only a subset of opcodes are supported. // Only a subset of opcodes are supported.
// //

View file

@ -89,6 +89,7 @@ func (ctx Context) ResultText(value string) {
} }
// ResultRawText sets the text result of the function to a []byte. // ResultRawText sets the text result of the function to a []byte.
// Returning a nil slice is the same as calling [Context.ResultNull].
// //
// https://sqlite.org/c3ref/result_blob.html // https://sqlite.org/c3ref/result_blob.html
func (ctx Context) ResultRawText(value []byte) { func (ctx Context) ResultRawText(value []byte) {

View file

@ -106,6 +106,11 @@ func (e ErrorCode) Temporary() bool {
return e == BUSY return e == BUSY
} }
// ExtendedCode returns the extended error code for this error.
func (e ErrorCode) ExtendedCode() ExtendedErrorCode {
return ExtendedErrorCode(e)
}
// Error implements the error interface. // Error implements the error interface.
func (e ExtendedErrorCode) Error() string { func (e ExtendedErrorCode) Error() string {
return util.ErrorCodeString(uint32(e)) return util.ErrorCodeString(uint32(e))
@ -136,6 +141,11 @@ func (e ExtendedErrorCode) Timeout() bool {
return e == BUSY_TIMEOUT return e == BUSY_TIMEOUT
} }
// Code returns the primary error code for this error.
func (e ExtendedErrorCode) Code() ErrorCode {
return ErrorCode(e)
}
func errorCode(err error, def ErrorCode) (msg string, code uint32) { func errorCode(err error, def ErrorCode) (msg string, code uint32) {
switch code := err.(type) { switch code := err.(type) {
case nil: case nil:

View file

@ -1,4 +1,4 @@
//go:build unix && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_noshm || sqlite3_nosys) //go:build unix && !sqlite3_nosys
package util package util
@ -55,10 +55,10 @@ type MappedRegion struct {
used bool used bool
} }
func MapRegion(ctx context.Context, mod api.Module, f *os.File, offset int64, size int32, prot int) (*MappedRegion, error) { func MapRegion(ctx context.Context, mod api.Module, f *os.File, offset int64, size int32, readOnly bool) (*MappedRegion, error) {
s := ctx.Value(moduleKey{}).(*moduleState) s := ctx.Value(moduleKey{}).(*moduleState)
r := s.new(ctx, mod, size) r := s.new(ctx, mod, size)
err := r.mmap(f, offset, prot) err := r.mmap(f, offset, readOnly)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -75,7 +75,11 @@ func (r *MappedRegion) Unmap() error {
return err return err
} }
func (r *MappedRegion) mmap(f *os.File, offset int64, prot int) error { func (r *MappedRegion) mmap(f *os.File, offset int64, readOnly bool) error {
prot := unix.PROT_READ
if !readOnly {
prot |= unix.PROT_WRITE
}
_, err := unix.MmapPtr(int(f.Fd()), offset, r.addr, uintptr(r.size), _, err := unix.MmapPtr(int(f.Fd()), offset, r.addr, uintptr(r.size),
prot, unix.MAP_SHARED|unix.MAP_FIXED) prot, unix.MAP_SHARED|unix.MAP_FIXED)
r.used = err == nil r.used = err == nil

View file

@ -1,4 +1,4 @@
//go:build !unix || !(386 || arm || amd64 || arm64 || riscv64 || ppc64le) || sqlite3_noshm || sqlite3_nosys //go:build !unix || sqlite3_nosys
package util package util

View file

@ -0,0 +1,53 @@
//go:build !sqlite3_nosys
package util
import (
"context"
"os"
"reflect"
"unsafe"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/windows"
)
type MappedRegion struct {
windows.Handle
Data []byte
addr uintptr
}
func MapRegion(ctx context.Context, mod api.Module, f *os.File, offset int64, size int32) (*MappedRegion, error) {
h, err := windows.CreateFileMapping(windows.Handle(f.Fd()), nil, windows.PAGE_READWRITE, 0, 0, nil)
if h == 0 {
return nil, err
}
a, err := windows.MapViewOfFile(h, windows.FILE_MAP_WRITE,
uint32(offset>>32), uint32(offset), uintptr(size))
if a == 0 {
windows.CloseHandle(h)
return nil, err
}
res := &MappedRegion{Handle: h, addr: a}
// SliceHeader, although deprecated, avoids a go vet warning.
sh := (*reflect.SliceHeader)(unsafe.Pointer(&res.Data))
sh.Len = int(size)
sh.Cap = int(size)
sh.Data = a
return res, nil
}
func (r *MappedRegion) Unmap() error {
if r.Data == nil {
return nil
}
err := windows.UnmapViewOfFile(r.addr)
if err != nil {
return err
}
r.Data = nil
return windows.CloseHandle(r.Handle)
}

View file

@ -255,6 +255,7 @@ func (s *Stmt) BindText(param int, value string) error {
// BindRawText binds a []byte to the prepared statement as text. // BindRawText binds a []byte to the prepared statement as text.
// The leftmost SQL parameter has an index of 1. // The leftmost SQL parameter has an index of 1.
// Binding a nil slice is the same as calling [Stmt.BindNull].
// //
// https://sqlite.org/c3ref/bind_blob.html // https://sqlite.org/c3ref/bind_blob.html
func (s *Stmt) BindRawText(param int, value []byte) error { func (s *Stmt) BindRawText(param int, value []byte) error {

View file

@ -15,24 +15,23 @@ The main differences are [file locking](#file-locking) and [WAL mode](#write-ahe
POSIX advisory locks, which SQLite uses on Unix, are POSIX advisory locks, which SQLite uses on Unix, are
[broken by design](https://github.com/sqlite/sqlite/blob/b74eb0/src/os_unix.c#L1073-L1161). [broken by design](https://github.com/sqlite/sqlite/blob/b74eb0/src/os_unix.c#L1073-L1161).
Instead, on Linux and macOS, this package uses
On Linux and macOS, this package uses
[OFD locks](https://www.gnu.org/software/libc/manual/html_node/Open-File-Description-Locks.html) [OFD locks](https://www.gnu.org/software/libc/manual/html_node/Open-File-Description-Locks.html)
to synchronize access to database files. to synchronize access to database files.
OFD locks are fully compatible with POSIX advisory locks.
This package can also use This package can also use
[BSD locks](https://man.freebsd.org/cgi/man.cgi?query=flock&sektion=2), [BSD locks](https://man.freebsd.org/cgi/man.cgi?query=flock&sektion=2),
albeit with reduced concurrency (`BEGIN IMMEDIATE` behaves like `BEGIN EXCLUSIVE`). albeit with reduced concurrency (`BEGIN IMMEDIATE` behaves like `BEGIN EXCLUSIVE`).
On BSD, macOS, and illumos, BSD locks are fully compatible with POSIX advisory locks;
on Linux and z/OS, they are fully functional, but incompatible;
elsewhere, they are very likely broken.
BSD locks are the default on BSD and illumos, BSD locks are the default on BSD and illumos,
but you can opt into them with the `sqlite3_flock` build tag. but you can opt into them with the `sqlite3_flock` build tag.
On Windows, this package uses `LockFileEx` and `UnlockFileEx`, On Windows, this package uses `LockFileEx` and `UnlockFileEx`,
like SQLite. like SQLite.
You can also opt into a cross-platform locking implementation
with the `sqlite3_dotlk` build tag.
The only requirement is an atomic `os.Mkdir`.
Otherwise, file locking is not supported, and you must use Otherwise, file locking is not supported, and you must use
[`nolock=1`](https://sqlite.org/uri.html#urinolock) [`nolock=1`](https://sqlite.org/uri.html#urinolock)
(or [`immutable=1`](https://sqlite.org/uri.html#uriimmutable)) (or [`immutable=1`](https://sqlite.org/uri.html#uriimmutable))
@ -46,7 +45,7 @@ to check if your build supports file locking.
### Write-Ahead Logging ### Write-Ahead Logging
On little-endian Unix, this package uses `mmap` to implement On Unix, this package may use `mmap` to implement
[shared-memory for the WAL-index](https://sqlite.org/wal.html#implementation_of_shared_memory_for_the_wal_index), [shared-memory for the WAL-index](https://sqlite.org/wal.html#implementation_of_shared_memory_for_the_wal_index),
like SQLite. like SQLite.
@ -55,6 +54,11 @@ a WAL database can only be accessed by a single proccess.
Other processes that attempt to access a database locked with BSD locks, Other processes that attempt to access a database locked with BSD locks,
will fail with the [`SQLITE_PROTOCOL`](https://sqlite.org/rescode.html#protocol) error code. will fail with the [`SQLITE_PROTOCOL`](https://sqlite.org/rescode.html#protocol) error code.
On Windows, this package may use `MapViewOfFile`, like SQLite.
You can also opt into a cross-platform, in-process, memory sharing implementation
with the `sqlite3_dotlk` build tag.
Otherwise, [WAL support is limited](https://sqlite.org/wal.html#noshm), Otherwise, [WAL support is limited](https://sqlite.org/wal.html#noshm),
and `EXCLUSIVE` locking mode must be set to create, read, and write WAL databases. and `EXCLUSIVE` locking mode must be set to create, read, and write WAL databases.
To use `EXCLUSIVE` locking mode with the To use `EXCLUSIVE` locking mode with the
@ -67,7 +71,7 @@ to check if your build supports shared memory.
### Batch-Atomic Write ### Batch-Atomic Write
On 64-bit Linux, this package supports On Linux, this package may support
[batch-atomic writes](https://sqlite.org/cgi/src/technote/714) [batch-atomic writes](https://sqlite.org/cgi/src/technote/714)
on the F2FS filesystem. on the F2FS filesystem.
@ -86,27 +90,27 @@ The implementation is compatible with SQLite's
### Build Tags ### Build Tags
The VFS can be customized with a few build tags: The VFS can be customized with a few build tags:
- `sqlite3_flock` forces the use of BSD locks; it can be used on z/OS to enable locking, - `sqlite3_flock` forces the use of BSD locks.
and elsewhere to test BSD locks. - `sqlite3_dotlk` forces the use of dot-file locks.
- `sqlite3_nosys` prevents importing [`x/sys`](https://pkg.go.dev/golang.org/x/sys); - `sqlite3_nosys` prevents importing [`x/sys`](https://pkg.go.dev/golang.org/x/sys).
disables locking _and_ shared memory on all platforms.
- `sqlite3_noshm` disables shared memory on all platforms.
> [!IMPORTANT] > [!IMPORTANT]
> The default configuration of this package is compatible with the standard > The default configuration of this package is compatible with the standard
> [Unix and Windows SQLite VFSes](https://sqlite.org/vfs.html#multiple_vfses); > [Unix and Windows SQLite VFSes](https://sqlite.org/vfs.html#multiple_vfses);
> `sqlite3_flock` builds are compatible with the > `sqlite3_flock` builds are compatible with the
> [`unix-flock` VFS](https://sqlite.org/compile.html#enable_locking_style). > [`unix-flock` VFS](https://sqlite.org/compile.html#enable_locking_style);
> `sqlite3_dotlk` builds are compatible with the
> [`unix-dotfile` VFS](https://sqlite.org/compile.html#enable_locking_style).
> If incompatible file locking is used, accessing databases concurrently with > If incompatible file locking is used, accessing databases concurrently with
> _other_ SQLite libraries will eventually corrupt data. > _other_ SQLite libraries will eventually corrupt data.
### Custom VFSes ### Custom VFSes
- [`github.com/ncruces/go-sqlite3/vfs/adiantum`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/adiantum)
wraps a VFS to offer encryption at rest.
- [`github.com/ncruces/go-sqlite3/vfs/memdb`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/memdb) - [`github.com/ncruces/go-sqlite3/vfs/memdb`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/memdb)
implements an in-memory VFS. implements an in-memory VFS.
- [`github.com/ncruces/go-sqlite3/vfs/readervfs`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/readervfs) - [`github.com/ncruces/go-sqlite3/vfs/readervfs`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/readervfs)
implements a VFS for immutable databases. implements a VFS for immutable databases.
- [`github.com/ncruces/go-sqlite3/vfs/adiantum`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/adiantum)
wraps a VFS to offer encryption at rest.
- [`github.com/ncruces/go-sqlite3/vfs/xts`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/xts) - [`github.com/ncruces/go-sqlite3/vfs/xts`](https://pkg.go.dev/github.com/ncruces/go-sqlite3/vfs/xts)
wraps a VFS to offer encryption at rest. wraps a VFS to offer encryption at rest.

View file

@ -234,4 +234,8 @@ func (e _ErrorCode) Error() string {
_SHM_LOCK _ShmFlag = 2 _SHM_LOCK _ShmFlag = 2
_SHM_SHARED _ShmFlag = 4 _SHM_SHARED _ShmFlag = 4
_SHM_EXCLUSIVE _ShmFlag = 8 _SHM_EXCLUSIVE _ShmFlag = 8
_SHM_NLOCK = 8
_SHM_BASE = 120
_SHM_DMS = _SHM_BASE + _SHM_NLOCK
) )

View file

@ -35,10 +35,10 @@ func testSymlinks(path string) error {
func (vfsOS) Delete(path string, syncDir bool) error { func (vfsOS) Delete(path string, syncDir bool) error {
err := os.Remove(path) err := os.Remove(path)
if errors.Is(err, fs.ErrNotExist) {
return _IOERR_DELETE_NOENT
}
if err != nil { if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return _IOERR_DELETE_NOENT
}
return err return err
} }
if runtime.GOOS != "windows" && syncDir { if runtime.GOOS != "windows" && syncDir {
@ -151,6 +151,7 @@ func (f *vfsFile) Close() error {
if f.shm != nil { if f.shm != nil {
f.shm.Close() f.shm.Close()
} }
f.Unlock(LOCK_NONE)
return f.File.Close() return f.File.Close()
} }
@ -206,10 +207,10 @@ func (f *vfsFile) HasMoved() (bool, error) {
return false, err return false, err
} }
pi, err := os.Stat(f.Name()) pi, err := os.Stat(f.Name())
if errors.Is(err, fs.ErrNotExist) {
return true, nil
}
if err != nil { if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return true, nil
}
return false, err return false, err
} }
return !os.SameFile(fi, pi), nil return !os.SameFile(fi, pi), nil

View file

@ -1,4 +1,4 @@
//go:build (linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) && !sqlite3_nosys //go:build ((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk
package vfs package vfs

View file

@ -1,4 +1,4 @@
//go:build !(linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) || sqlite3_nosys //go:build !(((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk)
package vfs package vfs

View file

@ -78,19 +78,15 @@ type memDB struct {
// +checklocks:dataMtx // +checklocks:dataMtx
data []*[sectorSize]byte data []*[sectorSize]byte
// +checklocks:dataMtx // +checklocks:dataMtx
size int64 size int64
// +checklocks:lockMtx
shared int32
// +checklocks:lockMtx
reserved bool
// +checklocks:lockMtx
pending bool
// +checklocks:memoryMtx // +checklocks:memoryMtx
refs int refs int32
shared int32 // +checklocks:lockMtx
pending bool // +checklocks:lockMtx
reserved bool // +checklocks:lockMtx
lockMtx sync.Mutex lockMtx sync.Mutex
dataMtx sync.RWMutex dataMtx sync.RWMutex
@ -253,12 +249,12 @@ func (m *memFile) Unlock(lock vfs.LockLevel) error {
m.lockMtx.Lock() m.lockMtx.Lock()
defer m.lockMtx.Unlock() defer m.lockMtx.Unlock()
if m.pending && m.lock >= vfs.LOCK_PENDING { if m.lock >= vfs.LOCK_RESERVED {
m.pending = false
}
if m.reserved && m.lock >= vfs.LOCK_RESERVED {
m.reserved = false m.reserved = false
} }
if m.lock >= vfs.LOCK_PENDING {
m.pending = false
}
if lock < vfs.LOCK_SHARED { if lock < vfs.LOCK_SHARED {
m.shared-- m.shared--
} }

View file

@ -1,4 +1,4 @@
//go:build (freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) && !sqlite3_nosys //go:build ((freebsd || openbsd || netbsd || dragonfly || illumos) && !(sqlite3_dotlk || sqlite3_nosys)) || sqlite3_flock
package vfs package vfs

143
vendor/github.com/ncruces/go-sqlite3/vfs/os_dotlk.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
//go:build sqlite3_dotlk
package vfs
import (
"errors"
"io/fs"
"os"
"sync"
)
var (
// +checklocks:vfsDotLocksMtx
vfsDotLocks = map[string]*vfsDotLocker{}
vfsDotLocksMtx sync.Mutex
)
type vfsDotLocker struct {
shared int // +checklocks:vfsDotLocksMtx
pending *os.File // +checklocks:vfsDotLocksMtx
reserved *os.File // +checklocks:vfsDotLocksMtx
}
func osGetSharedLock(file *os.File) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
err := os.Mkdir(name+".lock", 0777)
if errors.Is(err, fs.ErrExist) {
return _BUSY // Another process has the lock.
}
if err != nil {
return _IOERR_LOCK
}
locker = &vfsDotLocker{}
vfsDotLocks[name] = locker
}
if locker.pending != nil {
return _BUSY
}
locker.shared++
return _OK
}
func osGetReservedLock(file *os.File) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_LOCK
}
if locker.reserved != nil && locker.reserved != file {
return _BUSY
}
locker.reserved = file
return _OK
}
func osGetExclusiveLock(file *os.File, _ *LockLevel) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_LOCK
}
if locker.pending != nil && locker.pending != file {
return _BUSY
}
locker.pending = file
if locker.shared > 1 {
return _BUSY
}
return _OK
}
func osDowngradeLock(file *os.File, _ LockLevel) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_UNLOCK
}
if locker.reserved == file {
locker.reserved = nil
}
if locker.pending == file {
locker.pending = nil
}
return _OK
}
func osReleaseLock(file *os.File, state LockLevel) _ErrorCode {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return _IOERR_UNLOCK
}
if locker.shared == 1 {
err := os.Remove(name + ".lock")
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return _IOERR_UNLOCK
}
delete(vfsDotLocks, name)
}
if locker.reserved == file {
locker.reserved = nil
}
if locker.pending == file {
locker.pending = nil
}
locker.shared--
return _OK
}
func osCheckReservedLock(file *os.File) (bool, _ErrorCode) {
vfsDotLocksMtx.Lock()
defer vfsDotLocksMtx.Unlock()
name := file.Name()
locker := vfsDotLocks[name]
if locker == nil {
return false, _OK
}
return locker.reserved != nil, _OK
}

View file

@ -1,4 +1,4 @@
//go:build (linux || darwin) && !(sqlite3_flock || sqlite3_nosys) //go:build (linux || darwin) && !(sqlite3_flock || sqlite3_dotlk || sqlite3_nosys)
package vfs package vfs

View file

@ -1,4 +1,4 @@
//go:build !sqlite3_nosys //go:build !(sqlite3_dotlk || sqlite3_nosys)
package vfs package vfs

View file

@ -1,20 +1,7 @@
//go:build (darwin || linux) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_flock || sqlite3_noshm || sqlite3_nosys) //go:build ((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk
package vfs package vfs
import (
"context"
"io"
"os"
"sync"
"time"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/unix"
"github.com/ncruces/go-sqlite3/internal/util"
)
// SupportsSharedMemory is false on platforms that do not support shared memory. // SupportsSharedMemory is false on platforms that do not support shared memory.
// To use [WAL without shared-memory], you need to set [EXCLUSIVE locking mode]. // To use [WAL without shared-memory], you need to set [EXCLUSIVE locking mode].
// //
@ -22,12 +9,6 @@
// [EXCLUSIVE locking mode]: https://sqlite.org/pragma.html#pragma_locking_mode // [EXCLUSIVE locking mode]: https://sqlite.org/pragma.html#pragma_locking_mode
const SupportsSharedMemory = true const SupportsSharedMemory = true
const (
_SHM_NLOCK = 8
_SHM_BASE = 120
_SHM_DMS = _SHM_BASE + _SHM_NLOCK
)
func (f *vfsFile) SharedMemory() SharedMemory { return f.shm } func (f *vfsFile) SharedMemory() SharedMemory { return f.shm }
// NewSharedMemory returns a shared-memory WAL-index // NewSharedMemory returns a shared-memory WAL-index
@ -41,172 +22,5 @@ func NewSharedMemory(path string, flags OpenFlag) SharedMemory {
if flags&OPEN_MAIN_DB == 0 || flags&(OPEN_DELETEONCLOSE|OPEN_MEMORY) != 0 { if flags&OPEN_MAIN_DB == 0 || flags&(OPEN_DELETEONCLOSE|OPEN_MEMORY) != 0 {
return nil return nil
} }
return &vfsShm{ return &vfsShm{path: path}
path: path,
readOnly: flags&OPEN_READONLY != 0,
}
}
var _ blockingSharedMemory = &vfsShm{}
type vfsShm struct {
*os.File
path string
regions []*util.MappedRegion
readOnly bool
blocking bool
sync.Mutex
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.File == nil {
var flag int
if s.readOnly {
flag = unix.O_RDONLY
} else {
flag = unix.O_RDWR
}
f, err := os.OpenFile(s.path,
flag|unix.O_CREAT|unix.O_NOFOLLOW, 0666)
if err != nil {
return _CANTOPEN
}
s.File = f
}
// Dead man's switch.
if lock, rc := osTestLock(s.File, _SHM_DMS, 1); rc != _OK {
return _IOERR_LOCK
} else if lock == unix.F_WRLCK {
return _BUSY
} else if lock == unix.F_UNLCK {
if s.readOnly {
return _READONLY_CANTINIT
}
// Do not use a blocking lock here.
// If the lock cannot be obtained immediately,
// it means some other connection is truncating the file.
// And after it has done so, it will not release its lock,
// but only downgrade it to a shared lock.
// So no point in blocking here.
// The call below to obtain the shared DMS lock may use a blocking lock.
if rc := osWriteLock(s.File, _SHM_DMS, 1, 0); rc != _OK {
return rc
}
if err := s.Truncate(0); err != nil {
return _IOERR_SHMOPEN
}
}
if rc := osReadLock(s.File, _SHM_DMS, 1, time.Millisecond); rc != _OK {
return rc
}
return _OK
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
// Ensure size is a multiple of the OS page size.
if int(size)&(unix.Getpagesize()-1) != 0 {
return 0, _IOERR_SHMMAP
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
// Check if file is big enough.
o, err := s.Seek(0, io.SeekEnd)
if err != nil {
return 0, _IOERR_SHMSIZE
}
if n := (int64(id) + 1) * int64(size); n > o {
if !extend {
return 0, _OK
}
err := osAllocate(s.File, n)
if err != nil {
return 0, _IOERR_SHMSIZE
}
}
var prot int
if s.readOnly {
prot = unix.PROT_READ
} else {
prot = unix.PROT_READ | unix.PROT_WRITE
}
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, prot)
if err != nil {
return 0, _IOERR_SHMMAP
}
s.regions = append(s.regions, r)
if s.readOnly {
return r.Ptr, _READONLY
}
return r.Ptr, _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
// Argument check.
if n <= 0 || offset < 0 || offset+n > _SHM_NLOCK {
panic(util.AssertErr())
}
switch flags {
case
_SHM_LOCK | _SHM_SHARED,
_SHM_LOCK | _SHM_EXCLUSIVE,
_SHM_UNLOCK | _SHM_SHARED,
_SHM_UNLOCK | _SHM_EXCLUSIVE:
//
default:
panic(util.AssertErr())
}
if n != 1 && flags&_SHM_EXCLUSIVE == 0 {
panic(util.AssertErr())
}
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+int64(offset), int64(n))
case flags&_SHM_SHARED != 0:
return osReadLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
case flags&_SHM_EXCLUSIVE != 0:
return osWriteLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
default:
panic(util.AssertErr())
}
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.File == nil {
return
}
// Unmap regions.
for _, r := range s.regions {
r.Unmap()
}
clear(s.regions)
s.regions = s.regions[:0]
// Close the file.
if delete {
os.Remove(s.path)
}
s.Close()
s.File = nil
}
func (s *vfsShm) shmBarrier() {
s.Lock()
//lint:ignore SA2001 memory barrier.
s.Unlock()
}
func (s *vfsShm) shmEnableBlocking(block bool) {
s.blocking = block
} }

View file

@ -1,4 +1,4 @@
//go:build (freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_noshm || sqlite3_nosys) //go:build ((freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_dotlk || sqlite3_nosys)) || sqlite3_flock
package vfs package vfs
@ -14,44 +14,14 @@
"github.com/ncruces/go-sqlite3/internal/util" "github.com/ncruces/go-sqlite3/internal/util"
) )
// SupportsSharedMemory is false on platforms that do not support shared memory.
// To use [WAL without shared-memory], you need to set [EXCLUSIVE locking mode].
//
// [WAL without shared-memory]: https://sqlite.org/wal.html#noshm
// [EXCLUSIVE locking mode]: https://sqlite.org/pragma.html#pragma_locking_mode
const SupportsSharedMemory = true
const _SHM_NLOCK = 8
func (f *vfsFile) SharedMemory() SharedMemory { return f.shm }
// NewSharedMemory returns a shared-memory WAL-index
// backed by a file with the given path.
// It will return nil if shared-memory is not supported,
// or not appropriate for the given flags.
// Only [OPEN_MAIN_DB] databases may need a WAL-index.
// You must ensure all concurrent accesses to a database
// use shared-memory instances created with the same path.
func NewSharedMemory(path string, flags OpenFlag) SharedMemory {
if flags&OPEN_MAIN_DB == 0 || flags&(OPEN_DELETEONCLOSE|OPEN_MEMORY) != 0 {
return nil
}
return &vfsShm{
path: path,
readOnly: flags&OPEN_READONLY != 0,
}
}
type vfsShmFile struct { type vfsShmFile struct {
*os.File *os.File
info os.FileInfo info os.FileInfo
// +checklocks:vfsShmFilesMtx refs int // +checklocks:vfsShmFilesMtx
refs int
// +checklocks:lockMtx lock [_SHM_NLOCK]int16 // +checklocks:Mutex
lock [_SHM_NLOCK]int16 sync.Mutex
lockMtx sync.Mutex
} }
var ( var (
@ -62,10 +32,9 @@ type vfsShmFile struct {
type vfsShm struct { type vfsShm struct {
*vfsShmFile *vfsShmFile
path string path string
lock [_SHM_NLOCK]bool lock [_SHM_NLOCK]bool
regions []*util.MappedRegion regions []*util.MappedRegion
readOnly bool
} }
func (s *vfsShm) Close() error { func (s *vfsShm) Close() error {
@ -80,7 +49,7 @@ func (s *vfsShm) Close() error {
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK) s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count. // Decrease reference count.
if s.vfsShmFile.refs > 1 { if s.vfsShmFile.refs > 0 {
s.vfsShmFile.refs-- s.vfsShmFile.refs--
s.vfsShmFile = nil s.vfsShmFile = nil
return nil return nil
@ -97,7 +66,7 @@ func (s *vfsShm) Close() error {
panic(util.AssertErr()) panic(util.AssertErr())
} }
func (s *vfsShm) shmOpen() (rc _ErrorCode) { func (s *vfsShm) shmOpen() _ErrorCode {
if s.vfsShmFile != nil { if s.vfsShmFile != nil {
return _OK return _OK
} }
@ -128,34 +97,29 @@ func (s *vfsShm) shmOpen() (rc _ErrorCode) {
} }
} }
// Lock and truncate the file, if not readonly. // Lock and truncate the file.
// The lock is only released by closing the file. // The lock is only released by closing the file.
if s.readOnly { if rc := osLock(f, unix.LOCK_EX|unix.LOCK_NB, _IOERR_LOCK); rc != _OK {
rc = _READONLY_CANTINIT return rc
} else { }
if rc := osLock(f, unix.LOCK_EX|unix.LOCK_NB, _IOERR_LOCK); rc != _OK { if err := f.Truncate(0); err != nil {
return rc return _IOERR_SHMOPEN
}
if err := f.Truncate(0); err != nil {
return _IOERR_SHMOPEN
}
} }
// Add the new shared file. // Add the new shared file.
s.vfsShmFile = &vfsShmFile{ s.vfsShmFile = &vfsShmFile{
File: f, File: f,
info: fi, info: fi,
refs: 1,
} }
f = nil // Don't close the file. f = nil // Don't close the file.
for i, g := range vfsShmFiles { for i, g := range vfsShmFiles {
if g == nil { if g == nil {
vfsShmFiles[i] = s.vfsShmFile vfsShmFiles[i] = s.vfsShmFile
return rc return _OK
} }
} }
vfsShmFiles = append(vfsShmFiles, s.vfsShmFile) vfsShmFiles = append(vfsShmFiles, s.vfsShmFile)
return rc return _OK
} }
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) { func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
@ -177,32 +141,22 @@ func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, ext
if !extend { if !extend {
return 0, _OK return 0, _OK
} }
err := osAllocate(s.File, n) if osAllocate(s.File, n) != nil {
if err != nil {
return 0, _IOERR_SHMSIZE return 0, _IOERR_SHMSIZE
} }
} }
var prot int r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, false)
if s.readOnly {
prot = unix.PROT_READ
} else {
prot = unix.PROT_READ | unix.PROT_WRITE
}
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, prot)
if err != nil { if err != nil {
return 0, _IOERR_SHMMAP return 0, _IOERR_SHMMAP
} }
s.regions = append(s.regions, r) s.regions = append(s.regions, r)
if s.readOnly {
return r.Ptr, _READONLY
}
return r.Ptr, _OK return r.Ptr, _OK
} }
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode { func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.lockMtx.Lock() s.Lock()
defer s.lockMtx.Unlock() defer s.Unlock()
switch { switch {
case flags&_SHM_UNLOCK != 0: case flags&_SHM_UNLOCK != 0:
@ -224,7 +178,7 @@ func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
if s.lock[i] { if s.lock[i] {
panic(util.AssertErr()) panic(util.AssertErr())
} }
if s.vfsShmFile.lock[i] < 0 { if s.vfsShmFile.lock[i]+1 <= 0 {
return _BUSY return _BUSY
} }
} }
@ -261,8 +215,7 @@ func (s *vfsShm) shmUnmap(delete bool) {
for _, r := range s.regions { for _, r := range s.regions {
r.Unmap() r.Unmap()
} }
clear(s.regions) s.regions = nil
s.regions = s.regions[:0]
// Close the file. // Close the file.
if delete { if delete {
@ -272,7 +225,7 @@ func (s *vfsShm) shmUnmap(delete bool) {
} }
func (s *vfsShm) shmBarrier() { func (s *vfsShm) shmBarrier() {
s.lockMtx.Lock() s.Lock()
//lint:ignore SA2001 memory barrier. //lint:ignore SA2001 memory barrier.
s.lockMtx.Unlock() s.Unlock()
} }

84
vendor/github.com/ncruces/go-sqlite3/vfs/shm_copy.go generated vendored Normal file
View file

@ -0,0 +1,84 @@
//go:build (windows && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_dotlk
package vfs
import (
"unsafe"
"github.com/ncruces/go-sqlite3/internal/util"
)
const (
_WALINDEX_HDR_SIZE = 136
_WALINDEX_PGSZ = 32768
)
// This looks like a safe way of keeping the WAL-index in sync.
//
// The WAL-index file starts with a header,
// and the index doesn't meaningfully change if the header doesn't change.
//
// The header starts with two 48 byte, checksummed, copies of the same information,
// which are accessed independently between memory barriers.
// The checkpoint information that follows uses 4 byte aligned words.
//
// Finally, we have the WAL-index hash tables,
// which are only modified holding the exclusive WAL_WRITE_LOCK.
//
// Since all the data is either redundant+checksummed,
// 4 byte aligned, or modified under an exclusive lock,
// the copies below should correctly keep copies in sync.
//
// https://sqlite.org/walformat.html#the_wal_index_file_format
func (s *vfsShm) shmAcquire() {
if len(s.ptrs) == 0 || shmUnmodified(s.shadow[0][:], s.shared[0][:]) {
return
}
// Copies modified words from shared to private memory.
for id, p := range s.ptrs {
shared := shmPage(s.shared[id][:])
shadow := shmPage(s.shadow[id][:])
privat := shmPage(util.View(s.mod, p, _WALINDEX_PGSZ))
for i, shared := range shared {
if shadow[i] != shared {
shadow[i] = shared
privat[i] = shared
}
}
}
}
func (s *vfsShm) shmRelease() {
if len(s.ptrs) == 0 || shmUnmodified(s.shadow[0][:], util.View(s.mod, s.ptrs[0], _WALINDEX_HDR_SIZE)) {
return
}
// Copies modified words from private to shared memory.
for id, p := range s.ptrs {
shared := shmPage(s.shared[id][:])
shadow := shmPage(s.shadow[id][:])
privat := shmPage(util.View(s.mod, p, _WALINDEX_PGSZ))
for i, privat := range privat {
if shadow[i] != privat {
shadow[i] = privat
shared[i] = privat
}
}
}
}
func (s *vfsShm) shmBarrier() {
s.Lock()
s.shmAcquire()
s.shmRelease()
s.Unlock()
}
func shmPage(s []byte) *[_WALINDEX_PGSZ / 4]uint32 {
p := (*uint32)(unsafe.Pointer(unsafe.SliceData(s)))
return (*[_WALINDEX_PGSZ / 4]uint32)(unsafe.Slice(p, _WALINDEX_PGSZ/4))
}
func shmUnmodified(v1, v2 []byte) bool {
return *(*[_WALINDEX_HDR_SIZE]byte)(v1[:]) == *(*[_WALINDEX_HDR_SIZE]byte)(v2[:])
}

224
vendor/github.com/ncruces/go-sqlite3/vfs/shm_dotlk.go generated vendored Normal file
View file

@ -0,0 +1,224 @@
//go:build sqlite3_dotlk
package vfs
import (
"context"
"errors"
"io/fs"
"os"
"sync"
"github.com/ncruces/go-sqlite3/internal/util"
"github.com/tetratelabs/wazero/api"
)
type vfsShmBuffer struct {
shared [][_WALINDEX_PGSZ]byte
refs int // +checklocks:vfsShmBuffersMtx
lock [_SHM_NLOCK]int16 // +checklocks:Mutex
sync.Mutex
}
var (
// +checklocks:vfsShmBuffersMtx
vfsShmBuffers = map[string]*vfsShmBuffer{}
vfsShmBuffersMtx sync.Mutex
)
type vfsShm struct {
*vfsShmBuffer
mod api.Module
alloc api.Function
free api.Function
path string
shadow [][_WALINDEX_PGSZ]byte
ptrs []uint32
stack [1]uint64
lock [_SHM_NLOCK]bool
}
func (s *vfsShm) Close() error {
if s.vfsShmBuffer == nil {
return nil
}
vfsShmBuffersMtx.Lock()
defer vfsShmBuffersMtx.Unlock()
// Unlock everything.
s.shmLock(0, _SHM_NLOCK, _SHM_UNLOCK)
// Decrease reference count.
if s.vfsShmBuffer.refs > 0 {
s.vfsShmBuffer.refs--
s.vfsShmBuffer = nil
return nil
}
err := os.Remove(s.path)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return _IOERR_UNLOCK
}
delete(vfsShmBuffers, s.path)
s.vfsShmBuffer = nil
return nil
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.vfsShmBuffer != nil {
return _OK
}
vfsShmBuffersMtx.Lock()
defer vfsShmBuffersMtx.Unlock()
// Find a shared buffer, increase the reference count.
if g, ok := vfsShmBuffers[s.path]; ok {
s.vfsShmBuffer = g
g.refs++
return _OK
}
// Create a directory on disk to ensure only this process
// uses this path to register a shared memory.
err := os.Mkdir(s.path, 0777)
if errors.Is(err, fs.ErrExist) {
return _BUSY
}
if err != nil {
return _IOERR_LOCK
}
// Add the new shared buffer.
s.vfsShmBuffer = &vfsShmBuffer{}
vfsShmBuffers[s.path] = s.vfsShmBuffer
return _OK
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
if size != _WALINDEX_PGSZ {
return 0, _IOERR_SHMMAP
}
if s.mod == nil {
s.mod = mod
s.free = mod.ExportedFunction("sqlite3_free")
s.alloc = mod.ExportedFunction("sqlite3_malloc64")
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
s.Lock()
defer s.Unlock()
defer s.shmAcquire()
// Extend shared memory.
if int(id) >= len(s.shared) {
if !extend {
return 0, _OK
}
s.shared = append(s.shared, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shared)+1)...)
}
// Allocate shadow memory.
if int(id) >= len(s.shadow) {
s.shadow = append(s.shadow, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shadow)+1)...)
s.shadow[0][4] = 1 // force invalidation
}
// Allocate local memory.
for int(id) >= len(s.ptrs) {
s.stack[0] = uint64(size)
if err := s.alloc.CallWithStack(ctx, s.stack[:]); err != nil {
panic(err)
}
if s.stack[0] == 0 {
panic(util.OOMErr)
}
clear(util.View(s.mod, uint32(s.stack[0]), _WALINDEX_PGSZ))
s.ptrs = append(s.ptrs, uint32(s.stack[0]))
}
return s.ptrs[id], _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
s.Lock()
defer s.Unlock()
switch {
case flags&_SHM_LOCK != 0:
defer s.shmAcquire()
case flags&_SHM_EXCLUSIVE != 0:
s.shmRelease()
}
switch {
case flags&_SHM_UNLOCK != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
if s.vfsShmBuffer.lock[i] == 0 {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i] <= 0 {
s.vfsShmBuffer.lock[i] = 0
} else {
s.vfsShmBuffer.lock[i]--
}
s.lock[i] = false
}
}
case flags&_SHM_SHARED != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i]+1 <= 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmBuffer.lock[i]++
s.lock[i] = true
}
case flags&_SHM_EXCLUSIVE != 0:
for i := offset; i < offset+n; i++ {
if s.lock[i] {
panic(util.AssertErr())
}
if s.vfsShmBuffer.lock[i] != 0 {
return _BUSY
}
}
for i := offset; i < offset+n; i++ {
s.vfsShmBuffer.lock[i] = -1
s.lock[i] = true
}
default:
panic(util.AssertErr())
}
return _OK
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.vfsShmBuffer == nil {
return
}
defer s.Close()
s.Lock()
s.shmRelease()
defer s.Unlock()
for _, p := range s.ptrs {
s.stack[0] = uint64(p)
if err := s.free.CallWithStack(context.Background(), s.stack[:]); err != nil {
panic(err)
}
}
s.ptrs = nil
s.shadow = nil
}

168
vendor/github.com/ncruces/go-sqlite3/vfs/shm_ofd.go generated vendored Normal file
View file

@ -0,0 +1,168 @@
//go:build (linux || darwin) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_flock || sqlite3_dotlk || sqlite3_nosys)
package vfs
import (
"context"
"io"
"os"
"sync"
"time"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/unix"
"github.com/ncruces/go-sqlite3/internal/util"
)
type vfsShm struct {
*os.File
path string
regions []*util.MappedRegion
readOnly bool
blocking bool
sync.Mutex
}
var _ blockingSharedMemory = &vfsShm{}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.File == nil {
f, err := os.OpenFile(s.path,
unix.O_RDWR|unix.O_CREAT|unix.O_NOFOLLOW, 0666)
if err != nil {
f, err = os.OpenFile(s.path,
unix.O_RDONLY|unix.O_CREAT|unix.O_NOFOLLOW, 0666)
s.readOnly = true
}
if err != nil {
return _CANTOPEN
}
s.File = f
}
// Dead man's switch.
if lock, rc := osTestLock(s.File, _SHM_DMS, 1); rc != _OK {
return _IOERR_LOCK
} else if lock == unix.F_WRLCK {
return _BUSY
} else if lock == unix.F_UNLCK {
if s.readOnly {
return _READONLY_CANTINIT
}
// Do not use a blocking lock here.
// If the lock cannot be obtained immediately,
// it means some other connection is truncating the file.
// And after it has done so, it will not release its lock,
// but only downgrade it to a shared lock.
// So no point in blocking here.
// The call below to obtain the shared DMS lock may use a blocking lock.
if rc := osWriteLock(s.File, _SHM_DMS, 1, 0); rc != _OK {
return rc
}
if err := s.Truncate(0); err != nil {
return _IOERR_SHMOPEN
}
}
return osReadLock(s.File, _SHM_DMS, 1, time.Millisecond)
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
// Ensure size is a multiple of the OS page size.
if int(size)&(unix.Getpagesize()-1) != 0 {
return 0, _IOERR_SHMMAP
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
// Check if file is big enough.
o, err := s.Seek(0, io.SeekEnd)
if err != nil {
return 0, _IOERR_SHMSIZE
}
if n := (int64(id) + 1) * int64(size); n > o {
if !extend {
return 0, _OK
}
if s.readOnly || osAllocate(s.File, n) != nil {
return 0, _IOERR_SHMSIZE
}
}
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size, s.readOnly)
if err != nil {
return 0, _IOERR_SHMMAP
}
s.regions = append(s.regions, r)
if s.readOnly {
return r.Ptr, _READONLY
}
return r.Ptr, _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
// Argument check.
if n <= 0 || offset < 0 || offset+n > _SHM_NLOCK {
panic(util.AssertErr())
}
switch flags {
case
_SHM_LOCK | _SHM_SHARED,
_SHM_LOCK | _SHM_EXCLUSIVE,
_SHM_UNLOCK | _SHM_SHARED,
_SHM_UNLOCK | _SHM_EXCLUSIVE:
//
default:
panic(util.AssertErr())
}
if n != 1 && flags&_SHM_EXCLUSIVE == 0 {
panic(util.AssertErr())
}
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+int64(offset), int64(n))
case flags&_SHM_SHARED != 0:
return osReadLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
case flags&_SHM_EXCLUSIVE != 0:
return osWriteLock(s.File, _SHM_BASE+int64(offset), int64(n), timeout)
default:
panic(util.AssertErr())
}
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.File == nil {
return
}
// Unmap regions.
for _, r := range s.regions {
r.Unmap()
}
s.regions = nil
// Close the file.
if delete {
os.Remove(s.path)
}
s.Close()
s.File = nil
}
func (s *vfsShm) shmBarrier() {
s.Lock()
//lint:ignore SA2001 memory barrier.
s.Unlock()
}
func (s *vfsShm) shmEnableBlocking(block bool) {
s.blocking = block
}

View file

@ -1,4 +1,4 @@
//go:build !(darwin || linux || freebsd || openbsd || netbsd || dragonfly || illumos || sqlite3_flock) || !(386 || arm || amd64 || arm64 || riscv64 || ppc64le) || sqlite3_noshm || sqlite3_nosys //go:build !(((linux || darwin || windows || freebsd || openbsd || netbsd || dragonfly || illumos) && (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !sqlite3_nosys) || sqlite3_flock || sqlite3_dotlk)
package vfs package vfs

182
vendor/github.com/ncruces/go-sqlite3/vfs/shm_windows.go generated vendored Normal file
View file

@ -0,0 +1,182 @@
//go:build (386 || arm || amd64 || arm64 || riscv64 || ppc64le) && !(sqlite3_dotlk || sqlite3_nosys)
package vfs
import (
"context"
"io"
"os"
"sync"
"time"
"github.com/tetratelabs/wazero/api"
"golang.org/x/sys/windows"
"github.com/ncruces/go-sqlite3/internal/util"
"github.com/ncruces/go-sqlite3/util/osutil"
)
type vfsShm struct {
*os.File
mod api.Module
alloc api.Function
free api.Function
path string
regions []*util.MappedRegion
shared [][]byte
shadow [][_WALINDEX_PGSZ]byte
ptrs []uint32
stack [1]uint64
blocking bool
sync.Mutex
}
var _ blockingSharedMemory = &vfsShm{}
func (s *vfsShm) Close() error {
// Unmap regions.
for _, r := range s.regions {
r.Unmap()
}
s.regions = nil
// Close the file.
return s.File.Close()
}
func (s *vfsShm) shmOpen() _ErrorCode {
if s.File == nil {
f, err := osutil.OpenFile(s.path, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
return _CANTOPEN
}
s.File = f
}
// Dead man's switch.
if rc := osWriteLock(s.File, _SHM_DMS, 1, 0); rc == _OK {
err := s.Truncate(0)
osUnlock(s.File, _SHM_DMS, 1)
if err != nil {
return _IOERR_SHMOPEN
}
}
return osReadLock(s.File, _SHM_DMS, 1, time.Millisecond)
}
func (s *vfsShm) shmMap(ctx context.Context, mod api.Module, id, size int32, extend bool) (uint32, _ErrorCode) {
// Ensure size is a multiple of the OS page size.
if size != _WALINDEX_PGSZ || (windows.Getpagesize()-1)&_WALINDEX_PGSZ != 0 {
return 0, _IOERR_SHMMAP
}
if s.mod == nil {
s.mod = mod
s.free = mod.ExportedFunction("sqlite3_free")
s.alloc = mod.ExportedFunction("sqlite3_malloc64")
}
if rc := s.shmOpen(); rc != _OK {
return 0, rc
}
defer s.shmAcquire()
// Check if file is big enough.
o, err := s.Seek(0, io.SeekEnd)
if err != nil {
return 0, _IOERR_SHMSIZE
}
if n := (int64(id) + 1) * int64(size); n > o {
if !extend {
return 0, _OK
}
if osAllocate(s.File, n) != nil {
return 0, _IOERR_SHMSIZE
}
}
// Maps regions into memory.
for int(id) >= len(s.shared) {
r, err := util.MapRegion(ctx, mod, s.File, int64(id)*int64(size), size)
if err != nil {
return 0, _IOERR_SHMMAP
}
s.regions = append(s.regions, r)
s.shared = append(s.shared, r.Data)
}
// Allocate shadow memory.
if int(id) >= len(s.shadow) {
s.shadow = append(s.shadow, make([][_WALINDEX_PGSZ]byte, int(id)-len(s.shadow)+1)...)
s.shadow[0][4] = 1 // force invalidation
}
// Allocate local memory.
for int(id) >= len(s.ptrs) {
s.stack[0] = uint64(size)
if err := s.alloc.CallWithStack(ctx, s.stack[:]); err != nil {
panic(err)
}
if s.stack[0] == 0 {
panic(util.OOMErr)
}
clear(util.View(s.mod, uint32(s.stack[0]), _WALINDEX_PGSZ))
s.ptrs = append(s.ptrs, uint32(s.stack[0]))
}
return s.ptrs[id], _OK
}
func (s *vfsShm) shmLock(offset, n int32, flags _ShmFlag) _ErrorCode {
switch {
case flags&_SHM_LOCK != 0:
defer s.shmAcquire()
case flags&_SHM_EXCLUSIVE != 0:
s.shmRelease()
}
var timeout time.Duration
if s.blocking {
timeout = time.Millisecond
}
switch {
case flags&_SHM_UNLOCK != 0:
return osUnlock(s.File, _SHM_BASE+uint32(offset), uint32(n))
case flags&_SHM_SHARED != 0:
return osReadLock(s.File, _SHM_BASE+uint32(offset), uint32(n), timeout)
case flags&_SHM_EXCLUSIVE != 0:
return osWriteLock(s.File, _SHM_BASE+uint32(offset), uint32(n), timeout)
default:
panic(util.AssertErr())
}
}
func (s *vfsShm) shmUnmap(delete bool) {
if s.File == nil {
return
}
s.shmRelease()
// Free local memory.
for _, p := range s.ptrs {
s.stack[0] = uint64(p)
if err := s.free.CallWithStack(context.Background(), s.stack[:]); err != nil {
panic(err)
}
}
s.ptrs = nil
s.shadow = nil
s.shared = nil
// Close the file.
s.Close()
s.File = nil
if delete {
os.Remove(s.path)
}
}
func (s *vfsShm) shmEnableBlocking(block bool) {
s.blocking = block
}

7
vendor/modules.txt vendored
View file

@ -24,10 +24,9 @@ codeberg.org/gruf/go-fastcopy
# codeberg.org/gruf/go-fastpath/v2 v2.0.0 # codeberg.org/gruf/go-fastpath/v2 v2.0.0
## explicit; go 1.14 ## explicit; go 1.14
codeberg.org/gruf/go-fastpath/v2 codeberg.org/gruf/go-fastpath/v2
# codeberg.org/gruf/go-ffmpreg v0.4.2 # codeberg.org/gruf/go-ffmpreg v0.6.0
## explicit; go 1.22.0 ## explicit; go 1.22.0
codeberg.org/gruf/go-ffmpreg/embed/ffmpeg codeberg.org/gruf/go-ffmpreg/embed
codeberg.org/gruf/go-ffmpreg/embed/ffprobe
codeberg.org/gruf/go-ffmpreg/wasm codeberg.org/gruf/go-ffmpreg/wasm
# codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf # codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf
## explicit; go 1.21 ## explicit; go 1.21
@ -515,7 +514,7 @@ github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit ## explicit
github.com/munnerz/goautoneg github.com/munnerz/goautoneg
# github.com/ncruces/go-sqlite3 v0.20.0 # github.com/ncruces/go-sqlite3 v0.20.2
## explicit; go 1.21 ## explicit; go 1.21
github.com/ncruces/go-sqlite3 github.com/ncruces/go-sqlite3
github.com/ncruces/go-sqlite3/driver github.com/ncruces/go-sqlite3/driver