mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-22 11:46:40 +00:00
update go-structr v0.2.0 => v0.3.0 to fix possible hash collision issues (#2586)
This commit is contained in:
parent
aa8bbe6ad2
commit
81198fa2d0
4
go.mod
4
go.mod
|
@ -5,6 +5,7 @@ go 1.21
|
||||||
toolchain go1.21.3
|
toolchain go1.21.3
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
codeberg.org/gruf/go-bytes v1.0.2
|
||||||
codeberg.org/gruf/go-bytesize v1.0.2
|
codeberg.org/gruf/go-bytesize v1.0.2
|
||||||
codeberg.org/gruf/go-byteutil v1.2.0
|
codeberg.org/gruf/go-byteutil v1.2.0
|
||||||
codeberg.org/gruf/go-cache/v3 v3.5.7
|
codeberg.org/gruf/go-cache/v3 v3.5.7
|
||||||
|
@ -18,7 +19,7 @@ require (
|
||||||
codeberg.org/gruf/go-runners v1.6.2
|
codeberg.org/gruf/go-runners v1.6.2
|
||||||
codeberg.org/gruf/go-sched v1.2.3
|
codeberg.org/gruf/go-sched v1.2.3
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.4
|
codeberg.org/gruf/go-store/v2 v2.2.4
|
||||||
codeberg.org/gruf/go-structr v0.2.0
|
codeberg.org/gruf/go-structr v0.3.0
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.7.0
|
codeberg.org/superseriousbusiness/exif-terminator v0.7.0
|
||||||
github.com/DmitriyVTitov/size v1.5.0
|
github.com/DmitriyVTitov/size v1.5.0
|
||||||
github.com/KimMachineGun/automemlimit v0.5.0
|
github.com/KimMachineGun/automemlimit v0.5.0
|
||||||
|
@ -80,7 +81,6 @@ require (
|
||||||
require (
|
require (
|
||||||
codeberg.org/gruf/go-atomics v1.1.0 // indirect
|
codeberg.org/gruf/go-atomics v1.1.0 // indirect
|
||||||
codeberg.org/gruf/go-bitutil v1.1.0 // indirect
|
codeberg.org/gruf/go-bitutil v1.1.0 // indirect
|
||||||
codeberg.org/gruf/go-bytes v1.0.2 // indirect
|
|
||||||
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
|
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
|
||||||
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
||||||
github.com/aymerick/douceur v0.2.0 // indirect
|
github.com/aymerick/douceur v0.2.0 // indirect
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -68,8 +68,8 @@ codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk
|
||||||
codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A=
|
codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A=
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.4 h1:8HO1Jh2gg7boQKA3hsDAIXd9zwieu5uXwDXEcTOD9js=
|
codeberg.org/gruf/go-store/v2 v2.2.4 h1:8HO1Jh2gg7boQKA3hsDAIXd9zwieu5uXwDXEcTOD9js=
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.4/go.mod h1:zI4VWe5CpXAktYMtaBMrgA5QmO0sQH53LBRvfn1huys=
|
codeberg.org/gruf/go-store/v2 v2.2.4/go.mod h1:zI4VWe5CpXAktYMtaBMrgA5QmO0sQH53LBRvfn1huys=
|
||||||
codeberg.org/gruf/go-structr v0.2.0 h1:9U9uWae4j//HxpztDjw4z07WJi+8F8gMrRPLDBZ/rw4=
|
codeberg.org/gruf/go-structr v0.3.0 h1:qaQz40LVm6dWDDp0pGsHbsbO0+XbqsXZ9N5YgqMmG78=
|
||||||
codeberg.org/gruf/go-structr v0.2.0/go.mod h1:iTMx2Jw2yekHdg4VVY9Clz5u8Suf9veGdk3sWwNmM4M=
|
codeberg.org/gruf/go-structr v0.3.0/go.mod h1:v9TsGsCBNNSVm/qeOuiblAeIS72YyxEIUoRpW8j4xm8=
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.7.0 h1:Y6VApSXhKqExG0H2hZ2JelRK4xmWdjDQjn13CpEfzko=
|
codeberg.org/superseriousbusiness/exif-terminator v0.7.0 h1:Y6VApSXhKqExG0H2hZ2JelRK4xmWdjDQjn13CpEfzko=
|
||||||
codeberg.org/superseriousbusiness/exif-terminator v0.7.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
|
codeberg.org/superseriousbusiness/exif-terminator v0.7.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
|
2
vendor/codeberg.org/gruf/go-structr/README.md
generated
vendored
2
vendor/codeberg.org/gruf/go-structr/README.md
generated
vendored
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
A performant struct caching library with automated indexing by arbitrary combinations of fields, including support for negative results (errors!). An example use case is in database lookups.
|
A performant struct caching library with automated indexing by arbitrary combinations of fields, including support for negative results (errors!). An example use case is in database lookups.
|
||||||
|
|
||||||
|
Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed with either 32bit, 48bit or 64bit (default) hash checksum of the inputted raw index keys. The hash checksum size can be controlled by the following Go build-tags: `structr_32bit_hash` `structr_48bit_hash`
|
||||||
|
|
||||||
Some example code of how you can use `go-structr` in your application:
|
Some example code of how you can use `go-structr` in your application:
|
||||||
```golang
|
```golang
|
||||||
type Cached struct {
|
type Cached struct {
|
||||||
|
|
440
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
440
vendor/codeberg.org/gruf/go-structr/cache.go
generated
vendored
|
@ -3,7 +3,6 @@
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"reflect"
|
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -63,12 +62,7 @@ type Cache[StructType any] struct {
|
||||||
|
|
||||||
// keeps track of all indexed results,
|
// keeps track of all indexed results,
|
||||||
// in order of last recently used (LRU).
|
// in order of last recently used (LRU).
|
||||||
lruList list[*result[StructType]]
|
lruList list
|
||||||
|
|
||||||
// memory pools of common types.
|
|
||||||
llsPool []*list[*result[StructType]]
|
|
||||||
resPool []*result[StructType]
|
|
||||||
keyPool []*indexkey[StructType]
|
|
||||||
|
|
||||||
// max cache size, imposes size
|
// max cache size, imposes size
|
||||||
// limit on the lruList in order
|
// limit on the lruList in order
|
||||||
|
@ -84,7 +78,6 @@ type Cache[StructType any] struct {
|
||||||
// - Cache{}.lruList
|
// - Cache{}.lruList
|
||||||
// - Index{}.data
|
// - Index{}.data
|
||||||
// - Cache{} hook fns
|
// - Cache{} hook fns
|
||||||
// - Cache{} pools
|
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +105,7 @@ func (c *Cache[T]) Init(config Config[T]) {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
c.indices = make([]Index[T], len(config.Indices))
|
c.indices = make([]Index[T], len(config.Indices))
|
||||||
for i, cfg := range config.Indices {
|
for i, cfg := range config.Indices {
|
||||||
c.indices[i].init(cfg, config.MaxSize)
|
init_index(&c.indices[i], cfg, config.MaxSize)
|
||||||
}
|
}
|
||||||
c.ignore = config.IgnoreErr
|
c.ignore = config.IgnoreErr
|
||||||
c.copy = config.CopyValue
|
c.copy = config.CopyValue
|
||||||
|
@ -133,26 +126,15 @@ func (c *Cache[T]) Index(name string) *Index[T] {
|
||||||
|
|
||||||
// GetOne fetches one value from the cache stored under index, using key generated from key parts.
|
// GetOne fetches one value from the cache stored under index, using key generated from key parts.
|
||||||
// Note that given number of key parts MUST match expected number and types of the given index name.
|
// Note that given number of key parts MUST match expected number and types of the given index name.
|
||||||
func (c *Cache[T]) GetOne(index string, keyParts ...any) (T, bool) {
|
func (c *Cache[T]) GetOne(index string, key ...any) (T, bool) {
|
||||||
// Get index with name.
|
return c.GetOneBy(c.Index(index), key...)
|
||||||
idx := c.Index(index)
|
|
||||||
|
|
||||||
// Generate index key from provided parts.
|
|
||||||
key, ok := idx.hasher.FromParts(keyParts...)
|
|
||||||
if !ok {
|
|
||||||
var zero T
|
|
||||||
return zero, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch one value for key.
|
|
||||||
return c.GetOneBy(idx, key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOneBy fetches value from cache stored under index, using precalculated index key.
|
// GetOneBy fetches value from cache stored under index, using precalculated index key.
|
||||||
func (c *Cache[T]) GetOneBy(index *Index[T], key uint64) (T, bool) {
|
func (c *Cache[T]) GetOneBy(index *Index[T], key ...any) (T, bool) {
|
||||||
if index == nil {
|
if index == nil {
|
||||||
panic("no index given")
|
panic("no index given")
|
||||||
} else if !index.unique {
|
} else if !is_unique(index.flags) {
|
||||||
panic("cannot get one by non-unique index")
|
panic("cannot get one by non-unique index")
|
||||||
}
|
}
|
||||||
values := c.GetBy(index, key)
|
values := c.GetBy(index, key)
|
||||||
|
@ -165,44 +147,18 @@ func (c *Cache[T]) GetOneBy(index *Index[T], key uint64) (T, bool) {
|
||||||
|
|
||||||
// Get fetches values from the cache stored under index, using keys generated from given key parts.
|
// Get fetches values from the cache stored under index, using keys generated from given key parts.
|
||||||
// Note that each number of key parts MUST match expected number and types of the given index name.
|
// Note that each number of key parts MUST match expected number and types of the given index name.
|
||||||
func (c *Cache[T]) Get(index string, keysParts ...[]any) []T {
|
func (c *Cache[T]) Get(index string, keys ...[]any) []T {
|
||||||
// Get index with name.
|
return c.GetBy(c.Index(index), keys...)
|
||||||
idx := c.Index(index)
|
|
||||||
|
|
||||||
// Preallocate expected keys slice length.
|
|
||||||
keys := make([]uint64, 0, len(keysParts))
|
|
||||||
|
|
||||||
// Acquire hasher.
|
|
||||||
h := getHasher()
|
|
||||||
|
|
||||||
for _, parts := range keysParts {
|
|
||||||
h.Reset()
|
|
||||||
|
|
||||||
// Generate key from provided parts into buffer.
|
|
||||||
key, ok := idx.hasher.fromParts(h, parts...)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append hash sum to keys.
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done with h.
|
|
||||||
putHasher(h)
|
|
||||||
|
|
||||||
// Continue fetching values.
|
|
||||||
return c.GetBy(idx, keys...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBy fetches values from the cache stored under index, using precalculated index keys.
|
// GetBy fetches values from the cache stored under index, using precalculated index keys.
|
||||||
func (c *Cache[T]) GetBy(index *Index[T], keys ...uint64) []T {
|
func (c *Cache[T]) GetBy(index *Index[T], keys ...[]any) []T {
|
||||||
if index == nil {
|
if index == nil {
|
||||||
panic("no index given")
|
panic("no index given")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Preallocate a slice of est. len.
|
// Acquire hasher.
|
||||||
values := make([]T, 0, len(keys))
|
h := get_hasher()
|
||||||
|
|
||||||
// Acquire lock.
|
// Acquire lock.
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
@ -213,40 +169,61 @@ func (c *Cache[T]) GetBy(index *Index[T], keys ...uint64) []T {
|
||||||
panic("not initialized")
|
panic("not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check index for all keys.
|
// Preallocate expected ret slice.
|
||||||
|
values := make([]T, 0, len(keys))
|
||||||
|
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
|
|
||||||
// Get indexed results.
|
// Generate sum from provided key.
|
||||||
list := index.data[key]
|
sum, ok := index_hash(index, h, key)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if list != nil {
|
// Get indexed results list at key.
|
||||||
// Concatenate all results with values.
|
list := index_get(index, sum, key)
|
||||||
list.rangefn(func(e *elem[*result[T]]) {
|
if list == nil {
|
||||||
if e.Value.err != nil {
|
continue
|
||||||
return
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Append a copy of value.
|
// Concatenate all *values* from non-err cached results.
|
||||||
value := c.copy(e.Value.value)
|
list_rangefn(list, func(e *list_elem) {
|
||||||
|
entry := (*index_entry)(e.data)
|
||||||
|
res := entry.result
|
||||||
|
|
||||||
|
switch value := res.data.(type) {
|
||||||
|
case T:
|
||||||
|
// Append value COPY.
|
||||||
|
value = c.copy(value)
|
||||||
values = append(values, value)
|
values = append(values, value)
|
||||||
|
|
||||||
// Push to front of LRU list, USING
|
case error:
|
||||||
// THE RESULT'S LRU ENTRY, NOT THE
|
// Don't bump
|
||||||
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
// for errors.
|
||||||
c.lruList.moveFront(&e.Value.entry)
|
return
|
||||||
})
|
}
|
||||||
}
|
|
||||||
|
// Push to front of LRU list, USING
|
||||||
|
// THE RESULT'S LRU ENTRY, NOT THE
|
||||||
|
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
||||||
|
list_move_front(&c.lruList, &res.elem)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with lock.
|
// Done with lock.
|
||||||
c.mutex.Unlock()
|
c.mutex.Unlock()
|
||||||
|
|
||||||
|
// Done with h.
|
||||||
|
hash_pool.Put(h)
|
||||||
|
|
||||||
return values
|
return values
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put will insert the given values into cache,
|
// Put will insert the given values into cache,
|
||||||
// calling any invalidate hook on each value.
|
// calling any invalidate hook on each value.
|
||||||
func (c *Cache[T]) Put(values ...T) {
|
func (c *Cache[T]) Put(values ...T) {
|
||||||
|
var z Hash
|
||||||
|
|
||||||
// Acquire lock.
|
// Acquire lock.
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
|
||||||
|
@ -261,7 +238,7 @@ func (c *Cache[T]) Put(values ...T) {
|
||||||
|
|
||||||
// Store all the passed values.
|
// Store all the passed values.
|
||||||
for _, value := range values {
|
for _, value := range values {
|
||||||
c.store(nil, 0, value, nil)
|
c.store_value(nil, z, nil, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with lock.
|
// Done with lock.
|
||||||
|
@ -279,23 +256,16 @@ func (c *Cache[T]) Put(values ...T) {
|
||||||
// LoadOne fetches one result from the cache stored under index, using key generated from key parts.
|
// LoadOne fetches one result from the cache stored under index, using key generated from key parts.
|
||||||
// In the case that no result is found, the provided load callback will be used to hydrate the cache.
|
// In the case that no result is found, the provided load callback will be used to hydrate the cache.
|
||||||
// Note that given number of key parts MUST match expected number and types of the given index name.
|
// Note that given number of key parts MUST match expected number and types of the given index name.
|
||||||
func (c *Cache[T]) LoadOne(index string, load func() (T, error), keyParts ...any) (T, error) {
|
func (c *Cache[T]) LoadOne(index string, load func() (T, error), key ...any) (T, error) {
|
||||||
// Get index with name.
|
return c.LoadOneBy(c.Index(index), load, key...)
|
||||||
idx := c.Index(index)
|
|
||||||
|
|
||||||
// Generate cache from from provided parts.
|
|
||||||
key, _ := idx.hasher.FromParts(keyParts...)
|
|
||||||
|
|
||||||
// Continue loading this result.
|
|
||||||
return c.LoadOneBy(idx, load, key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadOneBy fetches one result from the cache stored under index, using precalculated index key.
|
// LoadOneBy fetches one result from the cache stored under index, using precalculated index key.
|
||||||
// In the case that no result is found, provided load callback will be used to hydrate the cache.
|
// In the case that no result is found, provided load callback will be used to hydrate the cache.
|
||||||
func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64) (T, error) {
|
func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key ...any) (T, error) {
|
||||||
if index == nil {
|
if index == nil {
|
||||||
panic("no index given")
|
panic("no index given")
|
||||||
} else if !index.unique {
|
} else if !is_unique(index.flags) {
|
||||||
panic("cannot get one by non-unique index")
|
panic("cannot get one by non-unique index")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,6 +281,15 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Acquire hasher.
|
||||||
|
h := get_hasher()
|
||||||
|
|
||||||
|
// Generate sum from provided key.
|
||||||
|
sum, _ := index_hash(index, h, key)
|
||||||
|
|
||||||
|
// Done with h.
|
||||||
|
hash_pool.Put(h)
|
||||||
|
|
||||||
// Acquire lock.
|
// Acquire lock.
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
|
||||||
|
@ -324,26 +303,26 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
|
||||||
panic("not initialized")
|
panic("not initialized")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get indexed results.
|
// Get indexed list at hash key.
|
||||||
list := index.data[key]
|
list := index_get(index, sum, key)
|
||||||
|
|
||||||
if ok = (list != nil && list.head != nil); ok {
|
if ok = (list != nil); ok {
|
||||||
e := list.head
|
entry := (*index_entry)(list.head.data)
|
||||||
|
res := entry.result
|
||||||
|
|
||||||
// Extract val / err.
|
switch data := res.data.(type) {
|
||||||
val = e.Value.value
|
case T:
|
||||||
err = e.Value.err
|
// Return value COPY.
|
||||||
|
val = c.copy(data)
|
||||||
if err == nil {
|
case error:
|
||||||
// We only ever ret
|
// Return error.
|
||||||
// a COPY of value.
|
err = data
|
||||||
val = c.copy(val)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push to front of LRU list, USING
|
// Push to front of LRU list, USING
|
||||||
// THE RESULT'S LRU ENTRY, NOT THE
|
// THE RESULT'S LRU ENTRY, NOT THE
|
||||||
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
||||||
c.lruList.moveFront(&e.Value.entry)
|
list_move_front(&c.lruList, &res.elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with lock.
|
// Done with lock.
|
||||||
|
@ -370,7 +349,11 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
|
||||||
// Note this handles copying of
|
// Note this handles copying of
|
||||||
// the provided value, so it is
|
// the provided value, so it is
|
||||||
// safe for us to return as-is.
|
// safe for us to return as-is.
|
||||||
c.store(index, key, val, err)
|
if err != nil {
|
||||||
|
c.store_error(index, sum, key, err)
|
||||||
|
} else {
|
||||||
|
c.store_value(index, sum, key, val)
|
||||||
|
}
|
||||||
|
|
||||||
// Done with lock.
|
// Done with lock.
|
||||||
c.mutex.Unlock()
|
c.mutex.Unlock()
|
||||||
|
@ -384,7 +367,7 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
|
||||||
// callback to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load
|
// callback to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load
|
||||||
// the remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
|
// the remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
|
||||||
// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
|
// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
|
||||||
func (c *Cache[T]) Load(index string, get func(load func(keyParts ...any) bool), load func() ([]T, error)) (values []T, err error) {
|
func (c *Cache[T]) Load(index string, get func(load func(key ...any) bool), load func() ([]T, error)) (values []T, err error) {
|
||||||
return c.LoadBy(c.Index(index), get, load)
|
return c.LoadBy(c.Index(index), get, load)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -394,11 +377,14 @@ func (c *Cache[T]) Load(index string, get func(load func(keyParts ...any) bool),
|
||||||
// to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load the
|
// to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load the
|
||||||
// remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
|
// remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
|
||||||
// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
|
// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
|
||||||
func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(keyParts ...any) bool), load func() ([]T, error)) (values []T, err error) {
|
func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(key ...any) bool), load func() ([]T, error)) (values []T, err error) {
|
||||||
if index == nil {
|
if index == nil {
|
||||||
panic("no index given")
|
panic("no index given")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Acquire hasher.
|
||||||
|
h := get_hasher()
|
||||||
|
|
||||||
// Acquire lock.
|
// Acquire lock.
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
|
||||||
|
@ -417,58 +403,60 @@ func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(keyParts ...any) b
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Acquire hasher.
|
// Pass loader to user func.
|
||||||
h := getHasher()
|
get(func(key ...any) bool {
|
||||||
|
|
||||||
// Pass cache check to user func.
|
// Generate sum from provided key.
|
||||||
get(func(keyParts ...any) bool {
|
sum, ok := index_hash(index, h, key)
|
||||||
h.Reset()
|
|
||||||
|
|
||||||
// Generate index key from provided key parts.
|
|
||||||
key, ok := index.hasher.fromParts(h, keyParts...)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all indexed results.
|
// Get indexed results at hash key.
|
||||||
list := index.data[key]
|
list := index_get(index, sum, key)
|
||||||
|
if list == nil {
|
||||||
if list != nil && list.len > 0 {
|
return false
|
||||||
// Value length before
|
|
||||||
// any below appends.
|
|
||||||
before := len(values)
|
|
||||||
|
|
||||||
// Concatenate all results with values.
|
|
||||||
list.rangefn(func(e *elem[*result[T]]) {
|
|
||||||
if e.Value.err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append a copy of value.
|
|
||||||
value := c.copy(e.Value.value)
|
|
||||||
values = append(values, value)
|
|
||||||
|
|
||||||
// Push to front of LRU list, USING
|
|
||||||
// THE RESULT'S LRU ENTRY, NOT THE
|
|
||||||
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
|
||||||
c.lruList.moveFront(&e.Value.entry)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Only if values changed did
|
|
||||||
// we actually find anything.
|
|
||||||
return len(values) != before
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
// Value length before
|
||||||
})
|
// any below appends.
|
||||||
|
before := len(values)
|
||||||
|
|
||||||
// Done with h.
|
// Concatenate all *values* from non-err cached results.
|
||||||
putHasher(h)
|
list_rangefn(list, func(e *list_elem) {
|
||||||
|
entry := (*index_entry)(e.data)
|
||||||
|
res := entry.result
|
||||||
|
|
||||||
|
switch value := res.data.(type) {
|
||||||
|
case T:
|
||||||
|
// Append value COPY.
|
||||||
|
value = c.copy(value)
|
||||||
|
values = append(values, value)
|
||||||
|
|
||||||
|
case error:
|
||||||
|
// Don't bump
|
||||||
|
// for errors.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push to front of LRU list, USING
|
||||||
|
// THE RESULT'S LRU ENTRY, NOT THE
|
||||||
|
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
||||||
|
list_move_front(&c.lruList, &res.elem)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Only if values changed did
|
||||||
|
// we actually find anything.
|
||||||
|
return len(values) != before
|
||||||
|
})
|
||||||
|
|
||||||
// Done with lock.
|
// Done with lock.
|
||||||
c.mutex.Unlock()
|
c.mutex.Unlock()
|
||||||
unlocked = true
|
unlocked = true
|
||||||
|
|
||||||
|
// Done with h.
|
||||||
|
hash_pool.Put(h)
|
||||||
|
|
||||||
// Load uncached values.
|
// Load uncached values.
|
||||||
uncached, err := load()
|
uncached, err := load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -514,26 +502,29 @@ func (c *Cache[T]) Store(value T, store func() error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invalidate generates index key from parts and invalidates all stored under it.
|
// Invalidate generates index key from parts and invalidates all stored under it.
|
||||||
func (c *Cache[T]) Invalidate(index string, keyParts ...any) {
|
func (c *Cache[T]) Invalidate(index string, key ...any) {
|
||||||
// Get index with name.
|
c.InvalidateBy(c.Index(index), key...)
|
||||||
idx := c.Index(index)
|
|
||||||
|
|
||||||
// Generate cache from from provided parts.
|
|
||||||
key, ok := idx.hasher.FromParts(keyParts...)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Continue invalidation.
|
|
||||||
c.InvalidateBy(idx, key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// InvalidateBy invalidates all results stored under index key.
|
// InvalidateBy invalidates all results stored under index key.
|
||||||
func (c *Cache[T]) InvalidateBy(index *Index[T], key uint64) {
|
func (c *Cache[T]) InvalidateBy(index *Index[T], key ...any) {
|
||||||
if index == nil {
|
if index == nil {
|
||||||
panic("no index given")
|
panic("no index given")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Acquire hasher.
|
||||||
|
h := get_hasher()
|
||||||
|
|
||||||
|
// Generate sum from provided key.
|
||||||
|
sum, ok := index_hash(index, h, key)
|
||||||
|
|
||||||
|
// Done with h.
|
||||||
|
hash_pool.Put(h)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
var values []T
|
var values []T
|
||||||
|
|
||||||
// Acquire lock.
|
// Acquire lock.
|
||||||
|
@ -544,9 +535,13 @@ func (c *Cache[T]) InvalidateBy(index *Index[T], key uint64) {
|
||||||
|
|
||||||
// Delete all results under key from index, collecting
|
// Delete all results under key from index, collecting
|
||||||
// value results and dropping them from all their indices.
|
// value results and dropping them from all their indices.
|
||||||
index_delete(c, index, key, func(del *result[T]) {
|
index_delete(c, index, sum, key, func(del *result) {
|
||||||
if del.err == nil {
|
switch value := del.data.(type) {
|
||||||
values = append(values, del.value)
|
case T:
|
||||||
|
// Append value COPY.
|
||||||
|
value = c.copy(value)
|
||||||
|
values = append(values, value)
|
||||||
|
case error:
|
||||||
}
|
}
|
||||||
c.delete(del)
|
c.delete(del)
|
||||||
})
|
})
|
||||||
|
@ -592,7 +587,8 @@ func (c *Cache[T]) Trim(perc float64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Drop oldest from cache.
|
// Drop oldest from cache.
|
||||||
c.delete(oldest.Value)
|
res := (*result)(oldest.data)
|
||||||
|
c.delete(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with lock.
|
// Done with lock.
|
||||||
|
@ -602,16 +598,6 @@ func (c *Cache[T]) Trim(perc float64) {
|
||||||
// Clear empties the cache by calling .Trim(0).
|
// Clear empties the cache by calling .Trim(0).
|
||||||
func (c *Cache[T]) Clear() { c.Trim(0) }
|
func (c *Cache[T]) Clear() { c.Trim(0) }
|
||||||
|
|
||||||
// Clean drops unused items from its memory pools.
|
|
||||||
// Useful to free memory if cache has downsized.
|
|
||||||
func (c *Cache[T]) Clean() {
|
|
||||||
c.mutex.Lock()
|
|
||||||
c.llsPool = nil
|
|
||||||
c.resPool = nil
|
|
||||||
c.keyPool = nil
|
|
||||||
c.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the current length of cache.
|
// Len returns the current length of cache.
|
||||||
func (c *Cache[T]) Len() int {
|
func (c *Cache[T]) Len() int {
|
||||||
c.mutex.Lock()
|
c.mutex.Lock()
|
||||||
|
@ -628,91 +614,93 @@ func (c *Cache[T]) Cap() int {
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// store will store the given value / error result in the cache, storing it under the
|
func (c *Cache[T]) store_value(index *Index[T], hash Hash, key []any, value T) {
|
||||||
// already provided index + key if provided, else generating keys from provided value.
|
|
||||||
func (c *Cache[T]) store(index *Index[T], key uint64, value T, err error) {
|
|
||||||
// Acquire new result.
|
// Acquire new result.
|
||||||
res := result_acquire(c)
|
res := result_acquire(c)
|
||||||
|
|
||||||
if index != nil {
|
if index != nil {
|
||||||
// Append result to the provided
|
// Append result to the provided index
|
||||||
// precalculated key and its index.
|
// with precalculated key / its hash.
|
||||||
index_append(c, index, key, res)
|
index_append(c, index, hash, key, res)
|
||||||
|
|
||||||
} else if err != nil {
|
|
||||||
|
|
||||||
// This is an error result without
|
|
||||||
// an index provided, nothing we
|
|
||||||
// can do here so release result.
|
|
||||||
result_release(c, res)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set and check the result error.
|
// Create COPY of value.
|
||||||
if res.err = err; res.err == nil {
|
value = c.copy(value)
|
||||||
|
res.data = value
|
||||||
|
|
||||||
// This is value result, we need to
|
// Acquire hasher.
|
||||||
// store it under all other indices
|
h := get_hasher()
|
||||||
// other than the provided.
|
|
||||||
//
|
|
||||||
// Create COPY of value.
|
|
||||||
res.value = c.copy(value)
|
|
||||||
|
|
||||||
// Get reflected value of incoming
|
for i := range c.indices {
|
||||||
// value, used during cache key gen.
|
// Get current index ptr.
|
||||||
rvalue := reflect.ValueOf(value)
|
idx := &(c.indices[i])
|
||||||
|
|
||||||
// Acquire hasher.
|
if idx == index {
|
||||||
h := getHasher()
|
// Already stored under
|
||||||
|
// this index, ignore.
|
||||||
for i := range c.indices {
|
continue
|
||||||
// Get current index ptr.
|
|
||||||
idx := &(c.indices[i])
|
|
||||||
|
|
||||||
if idx == index {
|
|
||||||
// Already stored under
|
|
||||||
// this index, ignore.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate hash from reflect value,
|
|
||||||
// (this ignores zero value keys).
|
|
||||||
h.Reset() // reset buf first
|
|
||||||
key, ok := idx.hasher.fromRValue(h, rvalue)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append result to index at key.
|
|
||||||
index_append(c, idx, key, res)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with h.
|
// Get key and hash sum for this index.
|
||||||
putHasher(h)
|
key, sum, ok := index_key(idx, h, value)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append result to index at key.
|
||||||
|
index_append(c, idx, sum, key, res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Done with h.
|
||||||
|
hash_pool.Put(h)
|
||||||
|
|
||||||
if c.lruList.len > c.maxSize {
|
if c.lruList.len > c.maxSize {
|
||||||
// Cache has hit max size!
|
// Cache has hit max size!
|
||||||
// Drop the oldest element.
|
// Drop the oldest element.
|
||||||
res := c.lruList.tail.Value
|
ptr := c.lruList.tail.data
|
||||||
|
res := (*result)(ptr)
|
||||||
|
c.delete(res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache[T]) store_error(index *Index[T], hash Hash, key []any, err error) {
|
||||||
|
if index == nil {
|
||||||
|
// nothing we
|
||||||
|
// can do here.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Acquire new result.
|
||||||
|
res := result_acquire(c)
|
||||||
|
res.data = err
|
||||||
|
|
||||||
|
// Append result to the provided index
|
||||||
|
// with precalculated key / its hash.
|
||||||
|
index_append(c, index, hash, key, res)
|
||||||
|
|
||||||
|
if c.lruList.len > c.maxSize {
|
||||||
|
// Cache has hit max size!
|
||||||
|
// Drop the oldest element.
|
||||||
|
ptr := c.lruList.tail.data
|
||||||
|
res := (*result)(ptr)
|
||||||
c.delete(res)
|
c.delete(res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete will delete the given result from the cache, deleting
|
// delete will delete the given result from the cache, deleting
|
||||||
// it from all indices it is stored under, and main LRU list.
|
// it from all indices it is stored under, and main LRU list.
|
||||||
func (c *Cache[T]) delete(res *result[T]) {
|
func (c *Cache[T]) delete(res *result) {
|
||||||
for len(res.keys) != 0 {
|
for len(res.indexed) != 0 {
|
||||||
|
|
||||||
// Pop indexkey at end of list.
|
// Pop last indexed entry from list.
|
||||||
ikey := res.keys[len(res.keys)-1]
|
entry := res.indexed[len(res.indexed)-1]
|
||||||
res.keys = res.keys[:len(res.keys)-1]
|
res.indexed = res.indexed[:len(res.indexed)-1]
|
||||||
|
|
||||||
// Drop this result from list at key.
|
// Drop entry from index.
|
||||||
index_deleteOne(c, ikey.index, ikey)
|
index_delete_entry(c, entry)
|
||||||
|
|
||||||
// Release ikey to pool.
|
// Release to memory pool.
|
||||||
indexkey_release(c, ikey)
|
index_entry_release(entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release res to pool.
|
// Release res to pool.
|
||||||
|
|
41
vendor/codeberg.org/gruf/go-structr/debug.go
generated
vendored
41
vendor/codeberg.org/gruf/go-structr/debug.go
generated
vendored
|
@ -1,41 +0,0 @@
|
||||||
package structr
|
|
||||||
|
|
||||||
// String returns a useful debugging repr of result.
|
|
||||||
// func (r *result[T]) String() string {
|
|
||||||
// keysbuf := getBuf()
|
|
||||||
// keysbuf.B = append(keysbuf.B, '[')
|
|
||||||
// for i := range r.keys {
|
|
||||||
// keysbuf.B = strconv.AppendQuote(keysbuf.B, r.keys[i].key)
|
|
||||||
// keysbuf.B = append(keysbuf.B, ',')
|
|
||||||
// }
|
|
||||||
// if len(keysbuf.B) > 0 {
|
|
||||||
// keysbuf.B = keysbuf.B[:len(keysbuf.B)-1]
|
|
||||||
// }
|
|
||||||
// keysbuf.B = append(keysbuf.B, ']')
|
|
||||||
// str := fmt.Sprintf("{value=%v err=%v keys=%s}", r.value, r.err, keysbuf.B)
|
|
||||||
// putBuf(keysbuf)
|
|
||||||
// return str
|
|
||||||
// }
|
|
||||||
|
|
||||||
// String returns a useful debugging repr of index.
|
|
||||||
// func (i *Index[T]) String() string {
|
|
||||||
// databuf := getBuf()
|
|
||||||
// for key, values := range i.data {
|
|
||||||
// databuf.WriteString("key")
|
|
||||||
// databuf.B = strconv.AppendQuote(databuf.B, key)
|
|
||||||
// databuf.B = append(databuf.B, '=')
|
|
||||||
// fmt.Fprintf(databuf, "%v", values)
|
|
||||||
// databuf.B = append(databuf.B, ' ')
|
|
||||||
// }
|
|
||||||
// if len(i.data) > 0 {
|
|
||||||
// databuf.B = databuf.B[:len(databuf.B)-1]
|
|
||||||
// }
|
|
||||||
// str := fmt.Sprintf("{name=%s data={%s}}", i.name, databuf.B)
|
|
||||||
// putBuf(databuf)
|
|
||||||
// return str
|
|
||||||
// }
|
|
||||||
|
|
||||||
// String returns a useful debugging repr of indexkey.
|
|
||||||
// func (i *indexkey[T]) String() string {
|
|
||||||
// return i.index.name + "[" + strconv.Quote(i.key) + "]"
|
|
||||||
// }
|
|
74
vendor/codeberg.org/gruf/go-structr/hash.go
generated
vendored
74
vendor/codeberg.org/gruf/go-structr/hash.go
generated
vendored
|
@ -2,11 +2,50 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/zeebo/xxh3"
|
"github.com/zeebo/xxh3"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var hash_pool sync.Pool
|
||||||
|
|
||||||
|
func get_hasher() *xxh3.Hasher {
|
||||||
|
v := hash_pool.Get()
|
||||||
|
if v == nil {
|
||||||
|
v = new(xxh3.Hasher)
|
||||||
|
}
|
||||||
|
return v.(*xxh3.Hasher)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hash_sum(fields []structfield, h *xxh3.Hasher, key []any) (Hash, bool) {
|
||||||
|
if len(key) != len(fields) {
|
||||||
|
panicf("incorrect number key parts: want=%d received=%d",
|
||||||
|
len(key),
|
||||||
|
len(fields),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
var zero bool
|
||||||
|
h.Reset()
|
||||||
|
for i, part := range key {
|
||||||
|
zero = fields[i].hasher(h, part) || zero
|
||||||
|
}
|
||||||
|
// See: https://github.com/Cyan4973/xxHash/issues/453#issuecomment-696838445
|
||||||
|
//
|
||||||
|
// In order to extract 32-bit from a good 64-bit hash result,
|
||||||
|
// there are many possible choices, which are all valid.
|
||||||
|
// I would typically grab the lower 32-bit and call it a day.
|
||||||
|
//
|
||||||
|
// Grabbing any other 32-bit (the upper part for example) is fine too.
|
||||||
|
//
|
||||||
|
// xoring higher and lower bits makes more sense whenever the produced hash offers dubious quality.
|
||||||
|
// FNV, for example, has poor mixing in its lower bits, so it's better to mix with the higher bits.
|
||||||
|
//
|
||||||
|
// XXH3 already performs significant output mixing before returning the data,
|
||||||
|
// so it's not beneficial to add another xorfold stage.
|
||||||
|
return uint64ToHash(h.Sum64()), zero
|
||||||
|
}
|
||||||
|
|
||||||
func hasher(t reflect.Type) func(*xxh3.Hasher, any) bool {
|
func hasher(t reflect.Type) func(*xxh3.Hasher, any) bool {
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.Int,
|
case reflect.Int,
|
||||||
|
@ -137,13 +176,13 @@ func hasher(t reflect.Type) func(*xxh3.Hasher, any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash8bit(h *xxh3.Hasher, a any) bool {
|
func hash8bit(h *xxh3.Hasher, a any) bool {
|
||||||
u := *(*uint8)(iface_value(a))
|
u := *(*uint8)(data_ptr(a))
|
||||||
_, _ = h.Write([]byte{u})
|
_, _ = h.Write([]byte{u})
|
||||||
return u == 0
|
return u == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash8bitptr(h *xxh3.Hasher, a any) bool {
|
func hash8bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
u := (*uint8)(iface_value(a))
|
u := (*uint8)(data_ptr(a))
|
||||||
if u == nil {
|
if u == nil {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
0,
|
0,
|
||||||
|
@ -159,13 +198,13 @@ func hash8bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash8bitslice(h *xxh3.Hasher, a any) bool {
|
func hash8bitslice(h *xxh3.Hasher, a any) bool {
|
||||||
b := *(*[]byte)(iface_value(a))
|
b := *(*[]byte)(data_ptr(a))
|
||||||
_, _ = h.Write(b)
|
_, _ = h.Write(b)
|
||||||
return b == nil
|
return b == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash16bit(h *xxh3.Hasher, a any) bool {
|
func hash16bit(h *xxh3.Hasher, a any) bool {
|
||||||
u := *(*uint16)(iface_value(a))
|
u := *(*uint16)(data_ptr(a))
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
byte(u),
|
byte(u),
|
||||||
byte(u >> 8),
|
byte(u >> 8),
|
||||||
|
@ -174,7 +213,7 @@ func hash16bit(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash16bitptr(h *xxh3.Hasher, a any) bool {
|
func hash16bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
u := (*uint16)(iface_value(a))
|
u := (*uint16)(data_ptr(a))
|
||||||
if u == nil {
|
if u == nil {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
0,
|
0,
|
||||||
|
@ -191,7 +230,7 @@ func hash16bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash16bitslice(h *xxh3.Hasher, a any) bool {
|
func hash16bitslice(h *xxh3.Hasher, a any) bool {
|
||||||
u := *(*[]uint16)(iface_value(a))
|
u := *(*[]uint16)(data_ptr(a))
|
||||||
for i := range u {
|
for i := range u {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
byte(u[i]),
|
byte(u[i]),
|
||||||
|
@ -202,7 +241,7 @@ func hash16bitslice(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash32bit(h *xxh3.Hasher, a any) bool {
|
func hash32bit(h *xxh3.Hasher, a any) bool {
|
||||||
u := *(*uint32)(iface_value(a))
|
u := *(*uint32)(data_ptr(a))
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
byte(u),
|
byte(u),
|
||||||
byte(u >> 8),
|
byte(u >> 8),
|
||||||
|
@ -213,7 +252,7 @@ func hash32bit(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash32bitptr(h *xxh3.Hasher, a any) bool {
|
func hash32bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
u := (*uint32)(iface_value(a))
|
u := (*uint32)(data_ptr(a))
|
||||||
if u == nil {
|
if u == nil {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
0,
|
0,
|
||||||
|
@ -232,7 +271,7 @@ func hash32bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash32bitslice(h *xxh3.Hasher, a any) bool {
|
func hash32bitslice(h *xxh3.Hasher, a any) bool {
|
||||||
u := *(*[]uint32)(iface_value(a))
|
u := *(*[]uint32)(data_ptr(a))
|
||||||
for i := range u {
|
for i := range u {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
byte(u[i]),
|
byte(u[i]),
|
||||||
|
@ -245,7 +284,7 @@ func hash32bitslice(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash64bit(h *xxh3.Hasher, a any) bool {
|
func hash64bit(h *xxh3.Hasher, a any) bool {
|
||||||
u := *(*uint64)(iface_value(a))
|
u := *(*uint64)(data_ptr(a))
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
byte(u),
|
byte(u),
|
||||||
byte(u >> 8),
|
byte(u >> 8),
|
||||||
|
@ -260,7 +299,7 @@ func hash64bit(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash64bitptr(h *xxh3.Hasher, a any) bool {
|
func hash64bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
u := (*uint64)(iface_value(a))
|
u := (*uint64)(data_ptr(a))
|
||||||
if u == nil {
|
if u == nil {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
0,
|
0,
|
||||||
|
@ -283,7 +322,7 @@ func hash64bitptr(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hash64bitslice(h *xxh3.Hasher, a any) bool {
|
func hash64bitslice(h *xxh3.Hasher, a any) bool {
|
||||||
u := *(*[]uint64)(iface_value(a))
|
u := *(*[]uint64)(data_ptr(a))
|
||||||
for i := range u {
|
for i := range u {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
byte(u[i]),
|
byte(u[i]),
|
||||||
|
@ -300,13 +339,13 @@ func hash64bitslice(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashstring(h *xxh3.Hasher, a any) bool {
|
func hashstring(h *xxh3.Hasher, a any) bool {
|
||||||
s := *(*string)(iface_value(a))
|
s := *(*string)(data_ptr(a))
|
||||||
_, _ = h.WriteString(s)
|
_, _ = h.WriteString(s)
|
||||||
return s == ""
|
return s == ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashstringptr(h *xxh3.Hasher, a any) bool {
|
func hashstringptr(h *xxh3.Hasher, a any) bool {
|
||||||
s := (*string)(iface_value(a))
|
s := (*string)(data_ptr(a))
|
||||||
if s == nil {
|
if s == nil {
|
||||||
_, _ = h.Write([]byte{
|
_, _ = h.Write([]byte{
|
||||||
0,
|
0,
|
||||||
|
@ -322,7 +361,7 @@ func hashstringptr(h *xxh3.Hasher, a any) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashstringslice(h *xxh3.Hasher, a any) bool {
|
func hashstringslice(h *xxh3.Hasher, a any) bool {
|
||||||
s := *(*[]string)(iface_value(a))
|
s := *(*[]string)(data_ptr(a))
|
||||||
for i := range s {
|
for i := range s {
|
||||||
_, _ = h.WriteString(s[i])
|
_, _ = h.WriteString(s[i])
|
||||||
}
|
}
|
||||||
|
@ -363,8 +402,3 @@ func hashjsonmarshaler(h *xxh3.Hasher, a any) bool {
|
||||||
_, _ = h.Write(b)
|
_, _ = h.Write(b)
|
||||||
return b == nil
|
return b == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func iface_value(a any) unsafe.Pointer {
|
|
||||||
type eface struct{ _, v unsafe.Pointer }
|
|
||||||
return (*eface)(unsafe.Pointer(&a)).v
|
|
||||||
}
|
|
||||||
|
|
14
vendor/codeberg.org/gruf/go-structr/hash_32.go
generated
vendored
Normal file
14
vendor/codeberg.org/gruf/go-structr/hash_32.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build structr_32bit_hash
|
||||||
|
// +build structr_32bit_hash
|
||||||
|
|
||||||
|
package structr
|
||||||
|
|
||||||
|
// Hash is the current compiler
|
||||||
|
// flag defined cache key hash
|
||||||
|
// checksum type. Here; uint32.
|
||||||
|
type Hash uint32
|
||||||
|
|
||||||
|
// uint64ToHash converts uint64 to currently Hash type.
|
||||||
|
func uint64ToHash(u uint64) Hash {
|
||||||
|
return Hash(u >> 32)
|
||||||
|
}
|
21
vendor/codeberg.org/gruf/go-structr/hash_48.go
generated
vendored
Normal file
21
vendor/codeberg.org/gruf/go-structr/hash_48.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
//go:build structr_48bit_hash
|
||||||
|
// +build structr_48bit_hash
|
||||||
|
|
||||||
|
package structr
|
||||||
|
|
||||||
|
// Hash is the current compiler
|
||||||
|
// flag defined cache key hash
|
||||||
|
// checksum type. Here; uint48.
|
||||||
|
type Hash [6]byte
|
||||||
|
|
||||||
|
// uint64ToHash converts uint64 to currently Hash type.
|
||||||
|
func uint64ToHash(u uint64) Hash {
|
||||||
|
return Hash{
|
||||||
|
0: byte(u),
|
||||||
|
1: byte(u >> 8),
|
||||||
|
2: byte(u >> 16),
|
||||||
|
3: byte(u >> 24),
|
||||||
|
4: byte(u >> 32),
|
||||||
|
5: byte(u >> 40),
|
||||||
|
}
|
||||||
|
}
|
14
vendor/codeberg.org/gruf/go-structr/hash_64.go
generated
vendored
Normal file
14
vendor/codeberg.org/gruf/go-structr/hash_64.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build !structr_32bit_hash && !structr_48bit_hash
|
||||||
|
// +build !structr_32bit_hash,!structr_48bit_hash
|
||||||
|
|
||||||
|
package structr
|
||||||
|
|
||||||
|
// Hash is the current compiler
|
||||||
|
// flag defined cache key hash
|
||||||
|
// checksum type. Here; uint64.
|
||||||
|
type Hash uint64
|
||||||
|
|
||||||
|
// uint64ToHash converts uint64 to currently Hash type.
|
||||||
|
func uint64ToHash(u uint64) Hash {
|
||||||
|
return Hash(u)
|
||||||
|
}
|
176
vendor/codeberg.org/gruf/go-structr/hasher.go
generated
vendored
176
vendor/codeberg.org/gruf/go-structr/hasher.go
generated
vendored
|
@ -1,176 +0,0 @@
|
||||||
package structr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/zeebo/xxh3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Hasher provides hash checksumming for a configured
|
|
||||||
// index, based on an arbitrary combination of generic
|
|
||||||
// paramter struct type's fields. This provides hashing
|
|
||||||
// both by input of the fields separately, or passing
|
|
||||||
// an instance of the generic paramter struct type.
|
|
||||||
//
|
|
||||||
// Supported field types by the hasher include:
|
|
||||||
// - ~int
|
|
||||||
// - ~int8
|
|
||||||
// - ~int16
|
|
||||||
// - ~int32
|
|
||||||
// - ~int64
|
|
||||||
// - ~float32
|
|
||||||
// - ~float64
|
|
||||||
// - ~string
|
|
||||||
// - slices / ptrs of the above
|
|
||||||
type Hasher[StructType any] struct {
|
|
||||||
|
|
||||||
// fields contains our representation
|
|
||||||
// of struct fields contained in the
|
|
||||||
// creation of sums by this hasher.
|
|
||||||
fields []structfield
|
|
||||||
|
|
||||||
// zero specifies whether zero
|
|
||||||
// value fields are permitted.
|
|
||||||
zero bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHasher returns a new initialized Hasher for the receiving generic
|
|
||||||
// parameter type, comprising of the given field strings, and whether to
|
|
||||||
// allow zero values to be incldued within generated hash checksum values.
|
|
||||||
func NewHasher[T any](fields []string, allowZero bool) Hasher[T] {
|
|
||||||
var h Hasher[T]
|
|
||||||
|
|
||||||
// Preallocate expected struct field slice.
|
|
||||||
h.fields = make([]structfield, len(fields))
|
|
||||||
|
|
||||||
// Get the reflected struct ptr type.
|
|
||||||
t := reflect.TypeOf((*T)(nil)).Elem()
|
|
||||||
|
|
||||||
for i, fieldName := range fields {
|
|
||||||
// Split name to account for nesting.
|
|
||||||
names := strings.Split(fieldName, ".")
|
|
||||||
|
|
||||||
// Look for a usable struct field from type.
|
|
||||||
sfield, ok := findField(t, names, allowZero)
|
|
||||||
if !ok {
|
|
||||||
panicf("failed finding field: %s", fieldName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set parsed struct field.
|
|
||||||
h.fields[i] = sfield
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set config flags.
|
|
||||||
h.zero = allowZero
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromParts generates hash checksum (used as index key) from individual key parts.
|
|
||||||
func (h *Hasher[T]) FromParts(parts ...any) (sum uint64, ok bool) {
|
|
||||||
hh := getHasher()
|
|
||||||
sum, ok = h.fromParts(hh, parts...)
|
|
||||||
putHasher(hh)
|
|
||||||
return
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Hasher[T]) fromParts(hh *xxh3.Hasher, parts ...any) (sum uint64, ok bool) {
|
|
||||||
if len(parts) != len(h.fields) {
|
|
||||||
// User must provide correct number of parts for key.
|
|
||||||
panicf("incorrect number key parts: want=%d received=%d",
|
|
||||||
len(parts),
|
|
||||||
len(h.fields),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.zero {
|
|
||||||
// Zero values are permitted,
|
|
||||||
// mangle all values and ignore
|
|
||||||
// zero value return booleans.
|
|
||||||
for i, part := range parts {
|
|
||||||
|
|
||||||
// Write mangled part to hasher.
|
|
||||||
_ = h.fields[i].hasher(hh, part)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Zero values are NOT permitted.
|
|
||||||
for i, part := range parts {
|
|
||||||
|
|
||||||
// Write mangled field to hasher.
|
|
||||||
z := h.fields[i].hasher(hh, part)
|
|
||||||
|
|
||||||
if z {
|
|
||||||
// The value was zero for
|
|
||||||
// this type, return early.
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return hh.Sum64(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromValue generates hash checksum (used as index key) from a value, via reflection.
|
|
||||||
func (h *Hasher[T]) FromValue(value T) (sum uint64, ok bool) {
|
|
||||||
rvalue := reflect.ValueOf(value)
|
|
||||||
hh := getHasher()
|
|
||||||
sum, ok = h.fromRValue(hh, rvalue)
|
|
||||||
putHasher(hh)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Hasher[T]) fromRValue(hh *xxh3.Hasher, rvalue reflect.Value) (uint64, bool) {
|
|
||||||
// Follow any ptrs leading to value.
|
|
||||||
for rvalue.Kind() == reflect.Pointer {
|
|
||||||
rvalue = rvalue.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
if h.zero {
|
|
||||||
// Zero values are permitted,
|
|
||||||
// mangle all values and ignore
|
|
||||||
// zero value return booleans.
|
|
||||||
for i := range h.fields {
|
|
||||||
|
|
||||||
// Get the reflect value's field at idx.
|
|
||||||
fv := rvalue.FieldByIndex(h.fields[i].index)
|
|
||||||
fi := fv.Interface()
|
|
||||||
|
|
||||||
// Write mangled field to hasher.
|
|
||||||
_ = h.fields[i].hasher(hh, fi)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Zero values are NOT permitted.
|
|
||||||
for i := range h.fields {
|
|
||||||
|
|
||||||
// Get the reflect value's field at idx.
|
|
||||||
fv := rvalue.FieldByIndex(h.fields[i].index)
|
|
||||||
fi := fv.Interface()
|
|
||||||
|
|
||||||
// Write mangled field to hasher.
|
|
||||||
z := h.fields[i].hasher(hh, fi)
|
|
||||||
|
|
||||||
if z {
|
|
||||||
// The value was zero for
|
|
||||||
// this type, return early.
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return hh.Sum64(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
type structfield struct {
|
|
||||||
// index is the reflected index
|
|
||||||
// of this field (this takes into
|
|
||||||
// account struct nesting).
|
|
||||||
index []int
|
|
||||||
|
|
||||||
// hasher is the relevant function
|
|
||||||
// for hashing value of structfield
|
|
||||||
// into the supplied hashbuf, where
|
|
||||||
// return value indicates if zero.
|
|
||||||
hasher func(*xxh3.Hasher, any) bool
|
|
||||||
}
|
|
403
vendor/codeberg.org/gruf/go-structr/index.go
generated
vendored
403
vendor/codeberg.org/gruf/go-structr/index.go
generated
vendored
|
@ -1,7 +1,12 @@
|
||||||
package structr
|
package structr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/zeebo/xxh3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// IndexConfig defines config variables
|
// IndexConfig defines config variables
|
||||||
|
@ -13,6 +18,18 @@ type IndexConfig struct {
|
||||||
// keys for this index. Nested fields should
|
// keys for this index. Nested fields should
|
||||||
// be specified using periods. An example:
|
// be specified using periods. An example:
|
||||||
// "Username,Favorites.Color"
|
// "Username,Favorites.Color"
|
||||||
|
//
|
||||||
|
// Field types supported include:
|
||||||
|
// - ~int
|
||||||
|
// - ~int8
|
||||||
|
// - ~int16
|
||||||
|
// - ~int32
|
||||||
|
// - ~int64
|
||||||
|
// - ~float32
|
||||||
|
// - ~float64
|
||||||
|
// - ~string
|
||||||
|
// - slices of above
|
||||||
|
// - ptrs of above
|
||||||
Fields string
|
Fields string
|
||||||
|
|
||||||
// Multiple indicates whether to accept multiple
|
// Multiple indicates whether to accept multiple
|
||||||
|
@ -32,12 +49,12 @@ type IndexConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index is an exposed Cache internal model, used to
|
// Index is an exposed Cache internal model, used to
|
||||||
// generate keys and store struct results by the init
|
// extract struct keys, generate hash checksums for them
|
||||||
// defined key generation configuration. This model is
|
// and store struct results by the init defined config.
|
||||||
// exposed to provide faster lookups in the case that
|
// This model is exposed to provide faster lookups in the
|
||||||
// you would like to manually provide the used index
|
// case that you would like to manually provide the used
|
||||||
// via the Cache.___By() series of functions, or access
|
// index via the Cache.___By() series of functions, or
|
||||||
// the underlying index key generator.
|
// access the underlying index key generator.
|
||||||
type Index[StructType any] struct {
|
type Index[StructType any] struct {
|
||||||
|
|
||||||
// name is the actual name of this
|
// name is the actual name of this
|
||||||
|
@ -45,68 +62,168 @@ type Index[StructType any] struct {
|
||||||
// string value of contained fields.
|
// string value of contained fields.
|
||||||
name string
|
name string
|
||||||
|
|
||||||
// struct field key hasher.
|
// backing data store of the index, containing
|
||||||
hasher Hasher[StructType]
|
// the cached results contained within wrapping
|
||||||
|
// index_entry{} which also contains the exact
|
||||||
|
// key each result is stored under. the hash map
|
||||||
|
// only keys by the xxh3 hash checksum for speed.
|
||||||
|
data map[Hash]*list //[*index_entry[StructType]]
|
||||||
|
|
||||||
// backing in-memory data store of
|
// struct fields encompassed by
|
||||||
// generated index keys to result lists.
|
// keys (+ hashes) of this index.
|
||||||
data map[uint64]*list[*result[StructType]]
|
fields []structfield
|
||||||
|
|
||||||
// whether to allow
|
// index flags:
|
||||||
// multiple results
|
// - 1 << 0 = unique
|
||||||
// per index key.
|
// - 1 << 1 = allow zero
|
||||||
unique bool
|
flags uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
// init initializes this index with the given configuration.
|
// Key returns the configured fields as key, and hash sum of key.
|
||||||
func (i *Index[T]) init(config IndexConfig, max int) {
|
func (i *Index[T]) Key(value T) ([]any, Hash, bool) {
|
||||||
fields := strings.Split(config.Fields, ",")
|
h := get_hasher()
|
||||||
|
key, sum, ok := index_key(i, h, value)
|
||||||
|
hash_pool.Put(h)
|
||||||
|
return key, sum, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func is_unique(f uint8) bool {
|
||||||
|
const mask = uint8(1) << 0
|
||||||
|
return f&mask != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func set_is_unique(f *uint8) {
|
||||||
|
const mask = uint8(1) << 0
|
||||||
|
(*f) |= mask
|
||||||
|
}
|
||||||
|
|
||||||
|
func allow_zero(f uint8) bool {
|
||||||
|
const mask = uint8(1) << 1
|
||||||
|
return f&mask != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func set_allow_zero(f *uint8) {
|
||||||
|
const mask = uint8(1) << 1
|
||||||
|
(*f) |= mask
|
||||||
|
}
|
||||||
|
|
||||||
|
func init_index[T any](i *Index[T], config IndexConfig, max int) {
|
||||||
|
// Set name from the raw
|
||||||
|
// struct fields string.
|
||||||
i.name = config.Fields
|
i.name = config.Fields
|
||||||
i.hasher = NewHasher[T](fields, config.AllowZero)
|
|
||||||
i.unique = !config.Multiple
|
// Set struct flags.
|
||||||
i.data = make(map[uint64]*list[*result[T]], max+1)
|
if config.AllowZero {
|
||||||
|
set_allow_zero(&i.flags)
|
||||||
|
}
|
||||||
|
if !config.Multiple {
|
||||||
|
set_is_unique(&i.flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split to get the containing struct fields.
|
||||||
|
fields := strings.Split(config.Fields, ",")
|
||||||
|
|
||||||
|
// Preallocate expected struct field slice.
|
||||||
|
i.fields = make([]structfield, len(fields))
|
||||||
|
|
||||||
|
// Get the reflected struct ptr type.
|
||||||
|
t := reflect.TypeOf((*T)(nil)).Elem()
|
||||||
|
|
||||||
|
for x, fieldName := range fields {
|
||||||
|
// Split name to account for nesting.
|
||||||
|
names := strings.Split(fieldName, ".")
|
||||||
|
|
||||||
|
// Look for usable struct field.
|
||||||
|
i.fields[x] = find_field(t, names)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize index_entry list store.
|
||||||
|
i.data = make(map[Hash]*list, max+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hasher returns the hash checksummer associated with this index.
|
func index_key[T any](i *Index[T], h *xxh3.Hasher, value T) ([]any, Hash, bool) {
|
||||||
func (i *Index[T]) Hasher() *Hasher[T] {
|
key := extract_fields(value, i.fields)
|
||||||
return &i.hasher
|
sum, zero := hash_sum(i.fields, h, key)
|
||||||
|
if zero && !allow_zero(i.flags) {
|
||||||
|
var zero Hash
|
||||||
|
return nil, zero, false
|
||||||
|
}
|
||||||
|
return key, sum, true
|
||||||
}
|
}
|
||||||
|
|
||||||
func index_append[T any](c *Cache[T], i *Index[T], key uint64, res *result[T]) {
|
func index_hash[T any](i *Index[T], h *xxh3.Hasher, key []any) (Hash, bool) {
|
||||||
// Acquire + setup indexkey.
|
sum, zero := hash_sum(i.fields, h, key)
|
||||||
ikey := indexkey_acquire(c)
|
if zero && !allow_zero(i.flags) {
|
||||||
ikey.entry.Value = res
|
var zero Hash
|
||||||
ikey.key = key
|
return zero, false
|
||||||
ikey.index = i
|
}
|
||||||
|
return sum, true
|
||||||
|
}
|
||||||
|
|
||||||
// Append to result's indexkeys.
|
func index_get[T any](i *Index[T], hash Hash, key []any) *list {
|
||||||
res.keys = append(res.keys, ikey)
|
l := i.data[hash]
|
||||||
|
if l == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
entry := (*index_entry)(l.head.data)
|
||||||
|
if !is_equal(entry.key, key) {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func index_append[T any](c *Cache[T], i *Index[T], hash Hash, key []any, res *result) {
|
||||||
// Get list at key.
|
// Get list at key.
|
||||||
l := i.data[key]
|
l := i.data[hash]
|
||||||
|
|
||||||
if l == nil {
|
if l == nil {
|
||||||
|
|
||||||
// Allocate new list.
|
// Allocate new list.
|
||||||
l = list_acquire(c)
|
l = list_acquire()
|
||||||
i.data[key] = l
|
i.data[hash] = l
|
||||||
|
|
||||||
} else if i.unique {
|
} else if entry := (*index_entry)(l.head.data); //nocollapse
|
||||||
|
!is_equal(entry.key, key) {
|
||||||
|
|
||||||
// Remove currently
|
// Collision! Drop all.
|
||||||
// indexed result.
|
delete(i.data, hash)
|
||||||
old := l.head
|
|
||||||
l.remove(old)
|
// Iterate entries in list.
|
||||||
|
for x := 0; x < l.len; x++ {
|
||||||
|
|
||||||
|
// Pop current head.
|
||||||
|
list_remove(l, l.head)
|
||||||
|
|
||||||
|
// Extract result.
|
||||||
|
res := entry.result
|
||||||
|
|
||||||
|
// Drop index entry from res.
|
||||||
|
result_drop_index(res, i)
|
||||||
|
if len(res.indexed) == 0 {
|
||||||
|
|
||||||
|
// Old res now unused,
|
||||||
|
// release to mem pool.
|
||||||
|
result_release(c, res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
} else if is_unique(i.flags) {
|
||||||
|
|
||||||
|
// Remove current
|
||||||
|
// indexed entry.
|
||||||
|
list_remove(l, l.head)
|
||||||
|
|
||||||
// Get ptr to old
|
// Get ptr to old
|
||||||
// result before we
|
// entry before we
|
||||||
// release to pool.
|
// release to pool.
|
||||||
res := old.Value
|
res := entry.result
|
||||||
|
|
||||||
// Drop this index's key from
|
// Drop this index's key from
|
||||||
// old res now not indexed here.
|
// old res now not indexed here.
|
||||||
result_dropIndex(c, res, i)
|
result_drop_index(res, i)
|
||||||
if len(res.keys) == 0 {
|
if len(res.indexed) == 0 {
|
||||||
|
|
||||||
// Old res now unused,
|
// Old res now unused,
|
||||||
// release to mem pool.
|
// release to mem pool.
|
||||||
|
@ -114,100 +231,162 @@ func index_append[T any](c *Cache[T], i *Index[T], key uint64, res *result[T]) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add result indexkey to
|
// Acquire + setup index entry.
|
||||||
// front of results list.
|
entry := index_entry_acquire()
|
||||||
l.pushFront(&ikey.entry)
|
entry.index = unsafe.Pointer(i)
|
||||||
|
entry.result = res
|
||||||
|
entry.key = key
|
||||||
|
entry.hash = hash
|
||||||
|
|
||||||
|
// Append to result's indexed entries.
|
||||||
|
res.indexed = append(res.indexed, entry)
|
||||||
|
|
||||||
|
// Add index entry to index list.
|
||||||
|
list_push_front(l, &entry.elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
func index_deleteOne[T any](c *Cache[T], i *Index[T], ikey *indexkey[T]) {
|
func index_delete[T any](c *Cache[T], i *Index[T], hash Hash, key []any, fn func(*result)) {
|
||||||
// Get list at key.
|
|
||||||
l := i.data[ikey.key]
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from list.
|
|
||||||
l.remove(&ikey.entry)
|
|
||||||
if l.len == 0 {
|
|
||||||
|
|
||||||
// Remove list from map.
|
|
||||||
delete(i.data, ikey.key)
|
|
||||||
|
|
||||||
// Release list to pool.
|
|
||||||
list_release(c, l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func index_delete[T any](c *Cache[T], i *Index[T], key uint64, fn func(*result[T])) {
|
|
||||||
if fn == nil {
|
if fn == nil {
|
||||||
panic("nil fn")
|
panic("nil fn")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get list at key.
|
// Get list at hash.
|
||||||
l := i.data[key]
|
l := i.data[hash]
|
||||||
if l == nil {
|
if l == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete data at key.
|
entry := (*index_entry)(l.head.data)
|
||||||
delete(i.data, key)
|
|
||||||
|
|
||||||
// Iterate results in list.
|
// Check contains expected key for hash.
|
||||||
|
if !is_equal(entry.key, key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete data at hash.
|
||||||
|
delete(i.data, hash)
|
||||||
|
|
||||||
|
// Iterate entries in list.
|
||||||
for x := 0; x < l.len; x++ {
|
for x := 0; x < l.len; x++ {
|
||||||
|
|
||||||
// Pop current head.
|
// Pop current head.
|
||||||
res := l.head.Value
|
entry := (*index_entry)(l.head.data)
|
||||||
l.remove(l.head)
|
list_remove(l, l.head)
|
||||||
|
|
||||||
// Delete index's key
|
// Extract result.
|
||||||
// from result tracking.
|
res := entry.result
|
||||||
result_dropIndex(c, res, i)
|
|
||||||
|
|
||||||
// Call hook.
|
// Call hook.
|
||||||
fn(res)
|
fn(res)
|
||||||
|
|
||||||
|
// Drop index entry from res.
|
||||||
|
result_drop_index(res, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Release list to pool.
|
// Release to pool.
|
||||||
list_release(c, l)
|
list_release(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
type indexkey[T any] struct {
|
func index_delete_entry[T any](c *Cache[T], entry *index_entry) {
|
||||||
// linked list entry the related
|
// Get from entry.
|
||||||
// result is stored under in the
|
i := (*Index[T])(entry.index)
|
||||||
// Index.data[key] linked list.
|
|
||||||
entry elem[*result[T]]
|
|
||||||
|
|
||||||
// key is the generated index key
|
// Get list at hash sum.
|
||||||
// the related result is indexed
|
l := i.data[entry.hash]
|
||||||
// under, in the below index.
|
if l == nil {
|
||||||
key uint64
|
return
|
||||||
|
|
||||||
// index is the index that the
|
|
||||||
// related result is indexed in.
|
|
||||||
index *Index[T]
|
|
||||||
}
|
|
||||||
|
|
||||||
func indexkey_acquire[T any](c *Cache[T]) *indexkey[T] {
|
|
||||||
var ikey *indexkey[T]
|
|
||||||
|
|
||||||
if len(c.keyPool) == 0 {
|
|
||||||
// Allocate new key.
|
|
||||||
ikey = new(indexkey[T])
|
|
||||||
} else {
|
|
||||||
// Pop result from pool slice.
|
|
||||||
ikey = c.keyPool[len(c.keyPool)-1]
|
|
||||||
c.keyPool = c.keyPool[:len(c.keyPool)-1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ikey
|
// Remove entry from list.
|
||||||
|
list_remove(l, &entry.elem)
|
||||||
|
if l.len == 0 {
|
||||||
|
|
||||||
|
// Remove list from map.
|
||||||
|
delete(i.data, entry.hash)
|
||||||
|
|
||||||
|
// Release to pool.
|
||||||
|
list_release(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract result.
|
||||||
|
res := entry.result
|
||||||
|
|
||||||
|
// Drop index entry from res.
|
||||||
|
result_drop_index(res, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
func indexkey_release[T any](c *Cache[T], ikey *indexkey[T]) {
|
var entry_pool sync.Pool
|
||||||
// Reset indexkey.
|
|
||||||
ikey.entry.Value = nil
|
|
||||||
ikey.key = 0
|
|
||||||
ikey.index = nil
|
|
||||||
|
|
||||||
// Release indexkey to memory pool.
|
type index_entry struct {
|
||||||
c.keyPool = append(c.keyPool, ikey)
|
// elem contains the list element
|
||||||
|
// appended to each per-hash list
|
||||||
|
// within the Index{} type. the
|
||||||
|
// contained value is a self-ref.
|
||||||
|
elem list_elem
|
||||||
|
|
||||||
|
// index is the Index{} this
|
||||||
|
// index_entry{} is stored in.
|
||||||
|
index unsafe.Pointer
|
||||||
|
|
||||||
|
// result is the actual
|
||||||
|
// underlying result stored
|
||||||
|
// within the index. this
|
||||||
|
// also contains a ref to
|
||||||
|
// this *index_entry in order
|
||||||
|
// to track indices each result
|
||||||
|
// is currently stored under.
|
||||||
|
result *result
|
||||||
|
|
||||||
|
// key contains the actual
|
||||||
|
// key this item was stored
|
||||||
|
// under, used for collision
|
||||||
|
// check.
|
||||||
|
key []any
|
||||||
|
|
||||||
|
// hash contains computed
|
||||||
|
// hash checksum of .key.
|
||||||
|
hash Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
func index_entry_acquire() *index_entry {
|
||||||
|
// Acquire from pool.
|
||||||
|
v := entry_pool.Get()
|
||||||
|
if v == nil {
|
||||||
|
v = new(index_entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cast index_entry value.
|
||||||
|
entry := v.(*index_entry)
|
||||||
|
|
||||||
|
// Set index list elem entry on itself.
|
||||||
|
entry.elem.data = unsafe.Pointer(entry)
|
||||||
|
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
|
||||||
|
func index_entry_release(entry *index_entry) {
|
||||||
|
var zero Hash
|
||||||
|
|
||||||
|
// Reset index entry.
|
||||||
|
entry.elem.data = nil
|
||||||
|
entry.index = nil
|
||||||
|
entry.result = nil
|
||||||
|
entry.key = nil
|
||||||
|
entry.hash = zero
|
||||||
|
|
||||||
|
// Release to pool.
|
||||||
|
entry_pool.Put(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// is_equal returns whether 2 key slices are equal.
|
||||||
|
func is_equal(k1, k2 []any) bool {
|
||||||
|
if len(k1) != len(k2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range k1 {
|
||||||
|
if k1[i] != k2[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
67
vendor/codeberg.org/gruf/go-structr/list.go
generated
vendored
67
vendor/codeberg.org/gruf/go-structr/list.go
generated
vendored
|
@ -1,49 +1,55 @@
|
||||||
package structr
|
package structr
|
||||||
|
|
||||||
// elem represents an element
|
import (
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var list_pool sync.Pool
|
||||||
|
|
||||||
|
// elem represents an elem
|
||||||
// in a doubly-linked list.
|
// in a doubly-linked list.
|
||||||
type elem[T any] struct {
|
type list_elem struct {
|
||||||
next *elem[T]
|
next *list_elem
|
||||||
prev *elem[T]
|
prev *list_elem
|
||||||
Value T
|
|
||||||
|
// data is a ptr to the
|
||||||
|
// value this linked list
|
||||||
|
// element is embedded-in.
|
||||||
|
data unsafe.Pointer
|
||||||
}
|
}
|
||||||
|
|
||||||
// list implements a doubly-linked list, where:
|
// list implements a doubly-linked list, where:
|
||||||
// - head = index 0 (i.e. the front)
|
// - head = index 0 (i.e. the front)
|
||||||
// - tail = index n-1 (i.e. the back)
|
// - tail = index n-1 (i.e. the back)
|
||||||
type list[T any] struct {
|
type list struct {
|
||||||
head *elem[T]
|
head *list_elem
|
||||||
tail *elem[T]
|
tail *list_elem
|
||||||
len int
|
len int
|
||||||
}
|
}
|
||||||
|
|
||||||
func list_acquire[T any](c *Cache[T]) *list[*result[T]] {
|
func list_acquire() *list {
|
||||||
var l *list[*result[T]]
|
// Acquire from pool.
|
||||||
|
v := list_pool.Get()
|
||||||
if len(c.llsPool) == 0 {
|
if v == nil {
|
||||||
// Allocate new list.
|
v = new(list)
|
||||||
l = new(list[*result[T]])
|
|
||||||
} else {
|
|
||||||
// Pop list from pool slice.
|
|
||||||
l = c.llsPool[len(c.llsPool)-1]
|
|
||||||
c.llsPool = c.llsPool[:len(c.llsPool)-1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return l
|
// Cast list value.
|
||||||
|
return v.(*list)
|
||||||
}
|
}
|
||||||
|
|
||||||
func list_release[T any](c *Cache[T], l *list[*result[T]]) {
|
func list_release(l *list) {
|
||||||
// Reset list.
|
// Reset list.
|
||||||
l.head = nil
|
l.head = nil
|
||||||
l.tail = nil
|
l.tail = nil
|
||||||
l.len = 0
|
l.len = 0
|
||||||
|
|
||||||
// Release list to memory pool.
|
// Release to pool.
|
||||||
c.llsPool = append(c.llsPool, l)
|
list_pool.Put(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pushFront pushes new 'elem' to front of list.
|
func list_push_front(l *list, elem *list_elem) {
|
||||||
func (l *list[T]) pushFront(elem *elem[T]) {
|
|
||||||
if l.len == 0 {
|
if l.len == 0 {
|
||||||
// Set new tail + head
|
// Set new tail + head
|
||||||
l.head = elem
|
l.head = elem
|
||||||
|
@ -71,14 +77,12 @@ func (l *list[T]) pushFront(elem *elem[T]) {
|
||||||
l.len++
|
l.len++
|
||||||
}
|
}
|
||||||
|
|
||||||
// moveFront calls remove() on elem, followed by pushFront().
|
func list_move_front(l *list, elem *list_elem) {
|
||||||
func (l *list[T]) moveFront(elem *elem[T]) {
|
list_remove(l, elem)
|
||||||
l.remove(elem)
|
list_push_front(l, elem)
|
||||||
l.pushFront(elem)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove removes the 'elem' from the list.
|
func list_remove(l *list, elem *list_elem) {
|
||||||
func (l *list[T]) remove(elem *elem[T]) {
|
|
||||||
if l.len <= 1 {
|
if l.len <= 1 {
|
||||||
// Drop elem's links
|
// Drop elem's links
|
||||||
elem.next = nil
|
elem.next = nil
|
||||||
|
@ -117,8 +121,7 @@ func (l *list[T]) remove(elem *elem[T]) {
|
||||||
l.len--
|
l.len--
|
||||||
}
|
}
|
||||||
|
|
||||||
// rangefn ranges all the elements in the list, passing each to fn.
|
func list_rangefn(l *list, fn func(*list_elem)) {
|
||||||
func (l *list[T]) rangefn(fn func(*elem[T])) {
|
|
||||||
if fn == nil {
|
if fn == nil {
|
||||||
panic("nil fn")
|
panic("nil fn")
|
||||||
}
|
}
|
||||||
|
|
92
vendor/codeberg.org/gruf/go-structr/result.go
generated
vendored
92
vendor/codeberg.org/gruf/go-structr/result.go
generated
vendored
|
@ -1,75 +1,77 @@
|
||||||
package structr
|
package structr
|
||||||
|
|
||||||
type result[T any] struct {
|
import (
|
||||||
// linked list entry this result is
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var result_pool sync.Pool
|
||||||
|
|
||||||
|
type result struct {
|
||||||
|
// linked list elem this result is
|
||||||
// stored under in Cache.lruList.
|
// stored under in Cache.lruList.
|
||||||
entry elem[*result[T]]
|
elem list_elem
|
||||||
|
|
||||||
// keys tracks the indices
|
// indexed stores the indices
|
||||||
// result is stored under.
|
// this result is stored under.
|
||||||
keys []*indexkey[T]
|
indexed []*index_entry
|
||||||
|
|
||||||
// cached value.
|
// cached data (we maintain
|
||||||
value T
|
// the type data here using
|
||||||
|
// an interface as any one
|
||||||
// cached error.
|
// instance can be T / error).
|
||||||
err error
|
data interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func result_acquire[T any](c *Cache[T]) *result[T] {
|
func result_acquire[T any](c *Cache[T]) *result {
|
||||||
var res *result[T]
|
// Acquire from pool.
|
||||||
|
v := result_pool.Get()
|
||||||
if len(c.resPool) == 0 {
|
if v == nil {
|
||||||
// Allocate new result.
|
v = new(result)
|
||||||
res = new(result[T])
|
|
||||||
} else {
|
|
||||||
// Pop result from pool slice.
|
|
||||||
res = c.resPool[len(c.resPool)-1]
|
|
||||||
c.resPool = c.resPool[:len(c.resPool)-1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push to front of LRU list.
|
// Cast result value.
|
||||||
c.lruList.pushFront(&res.entry)
|
res := v.(*result)
|
||||||
res.entry.Value = res
|
|
||||||
|
// Push result elem to front of LRU list.
|
||||||
|
list_push_front(&c.lruList, &res.elem)
|
||||||
|
res.elem.data = unsafe.Pointer(res)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func result_release[T any](c *Cache[T], res *result[T]) {
|
func result_release[T any](c *Cache[T], res *result) {
|
||||||
// Remove from the LRU list.
|
// Remove result elem from LRU list.
|
||||||
c.lruList.remove(&res.entry)
|
list_remove(&c.lruList, &res.elem)
|
||||||
res.entry.Value = nil
|
res.elem.data = nil
|
||||||
|
|
||||||
var zero T
|
|
||||||
|
|
||||||
// Reset all result fields.
|
// Reset all result fields.
|
||||||
res.keys = res.keys[:0]
|
res.indexed = res.indexed[:0]
|
||||||
res.value = zero
|
res.data = nil
|
||||||
res.err = nil
|
|
||||||
|
|
||||||
// Release result to memory pool.
|
// Release to pool.
|
||||||
c.resPool = append(c.resPool, res)
|
result_pool.Put(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
func result_dropIndex[T any](c *Cache[T], res *result[T], index *Index[T]) {
|
func result_drop_index[T any](res *result, index *Index[T]) {
|
||||||
for i := 0; i < len(res.keys); i++ {
|
for i := 0; i < len(res.indexed); i++ {
|
||||||
|
|
||||||
if res.keys[i].index != index {
|
if res.indexed[i].index != unsafe.Pointer(index) {
|
||||||
// Prof. Obiwan:
|
// Prof. Obiwan:
|
||||||
// this is not the index
|
// this is not the index
|
||||||
// we are looking for.
|
// we are looking for.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get index key ptr.
|
// Get index entry ptr.
|
||||||
ikey := res.keys[i]
|
entry := res.indexed[i]
|
||||||
|
|
||||||
// Move all index keys down + reslice.
|
// Move all index entries down + reslice.
|
||||||
copy(res.keys[i:], res.keys[i+1:])
|
copy(res.indexed[i:], res.indexed[i+1:])
|
||||||
res.keys = res.keys[:len(res.keys)-1]
|
res.indexed = res.indexed[:len(res.indexed)-1]
|
||||||
|
|
||||||
// Release ikey to memory pool.
|
// Release to memory pool.
|
||||||
indexkey_release(c, ikey)
|
index_entry_release(entry)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
134
vendor/codeberg.org/gruf/go-structr/runtime.go
generated
vendored
Normal file
134
vendor/codeberg.org/gruf/go-structr/runtime.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
||||||
|
package structr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/modern-go/reflect2"
|
||||||
|
"github.com/zeebo/xxh3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type structfield struct {
|
||||||
|
// _type is the runtime type pointer
|
||||||
|
// underlying the struct field type.
|
||||||
|
// used for repacking our own erfaces.
|
||||||
|
_type reflect2.Type
|
||||||
|
|
||||||
|
// offset is the offset in memory
|
||||||
|
// of this struct field from the
|
||||||
|
// outer-most value ptr location.
|
||||||
|
offset uintptr
|
||||||
|
|
||||||
|
// hasher is the relevant function
|
||||||
|
// for hashing value of structfield
|
||||||
|
// into the supplied hashbuf, where
|
||||||
|
// return value indicates if zero.
|
||||||
|
hasher func(*xxh3.Hasher, any) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// find_field will search for a struct field with given set of names,
|
||||||
|
// where names is a len > 0 slice of names account for struct nesting.
|
||||||
|
func find_field(t reflect.Type, names []string) (sfield structfield) {
|
||||||
|
var (
|
||||||
|
// is_exported returns whether name is exported
|
||||||
|
// from a package; can be func or struct field.
|
||||||
|
is_exported = func(name string) bool {
|
||||||
|
r, _ := utf8.DecodeRuneInString(name)
|
||||||
|
return unicode.IsUpper(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pop_name pops the next name from
|
||||||
|
// the provided slice of field names.
|
||||||
|
pop_name = func() string {
|
||||||
|
name := names[0]
|
||||||
|
names = names[1:]
|
||||||
|
if !is_exported(name) {
|
||||||
|
panicf("field is not exported: %s", name)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// field is the iteratively searched
|
||||||
|
// struct field value in below loop.
|
||||||
|
field reflect.StructField
|
||||||
|
)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
// The only 2 types we support are
|
||||||
|
// structs, and ptrs to a struct.
|
||||||
|
case t.Kind() == reflect.Struct:
|
||||||
|
case t.Kind() == reflect.Pointer &&
|
||||||
|
t.Elem().Kind() == reflect.Struct:
|
||||||
|
t = t.Elem()
|
||||||
|
default:
|
||||||
|
panic("index only support struct{} and *struct{}")
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(names) > 0 {
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
// Pop next name.
|
||||||
|
name := pop_name()
|
||||||
|
|
||||||
|
// Check for valid struct type.
|
||||||
|
if t.Kind() != reflect.Struct {
|
||||||
|
panicf("field %s is not struct: %s", t, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for next field by name.
|
||||||
|
field, ok = t.FieldByName(name)
|
||||||
|
if !ok {
|
||||||
|
panicf("unknown field: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment total field offset.
|
||||||
|
sfield.offset += field.Offset
|
||||||
|
|
||||||
|
// Set the next type.
|
||||||
|
t = field.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get field type as reflect2.
|
||||||
|
sfield._type = reflect2.Type2(t)
|
||||||
|
|
||||||
|
// Find hasher for type.
|
||||||
|
sfield.hasher = hasher(t)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract_fields extracts given structfields from the provided value type,
|
||||||
|
// this is done using predetermined struct field memory offset locations.
|
||||||
|
func extract_fields[T any](value T, fields []structfield) []any {
|
||||||
|
// Get ptr to raw value data.
|
||||||
|
ptr := unsafe.Pointer(&value)
|
||||||
|
|
||||||
|
// If this is a pointer type deref the value ptr.
|
||||||
|
if reflect.TypeOf(value).Kind() == reflect.Pointer {
|
||||||
|
ptr = *(*unsafe.Pointer)(ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare slice of field ifaces.
|
||||||
|
ifaces := make([]any, len(fields))
|
||||||
|
|
||||||
|
for i := 0; i < len(fields); i++ {
|
||||||
|
// Manually access field at memory offset and pack eface.
|
||||||
|
ptr := unsafe.Pointer(uintptr(ptr) + fields[i].offset)
|
||||||
|
ifaces[i] = fields[i]._type.UnsafeIndirect(ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ifaces
|
||||||
|
}
|
||||||
|
|
||||||
|
// data_ptr returns the runtime data ptr associated with value.
|
||||||
|
func data_ptr(a any) unsafe.Pointer {
|
||||||
|
return (*struct{ t, v unsafe.Pointer })(unsafe.Pointer(&a)).v
|
||||||
|
}
|
||||||
|
|
||||||
|
// panicf provides a panic with string formatting.
|
||||||
|
func panicf(format string, args ...any) {
|
||||||
|
panic(fmt.Sprintf(format, args...))
|
||||||
|
}
|
5
vendor/codeberg.org/gruf/go-structr/test.sh
generated
vendored
Normal file
5
vendor/codeberg.org/gruf/go-structr/test.sh
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
go test -v -tags=structr_32bit_hash .
|
||||||
|
go test -v -tags=structr_48bit_hash .
|
||||||
|
go test -v -tags=structr_64bit_hash .
|
98
vendor/codeberg.org/gruf/go-structr/util.go
generated
vendored
98
vendor/codeberg.org/gruf/go-structr/util.go
generated
vendored
|
@ -1,98 +0,0 @@
|
||||||
package structr
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/zeebo/xxh3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// findField will search for a struct field with given set of names, where names is a len > 0 slice of names account for nesting.
|
|
||||||
func findField(t reflect.Type, names []string, allowZero bool) (sfield structfield, ok bool) {
|
|
||||||
var (
|
|
||||||
// isExported returns whether name is exported
|
|
||||||
// from a package; can be func or struct field.
|
|
||||||
isExported = func(name string) bool {
|
|
||||||
r, _ := utf8.DecodeRuneInString(name)
|
|
||||||
return unicode.IsUpper(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// popName pops the next name from
|
|
||||||
// the provided slice of field names.
|
|
||||||
popName = func() string {
|
|
||||||
// Pop next name.
|
|
||||||
name := names[0]
|
|
||||||
names = names[1:]
|
|
||||||
|
|
||||||
// Ensure valid name.
|
|
||||||
if !isExported(name) {
|
|
||||||
panicf("field is not exported: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// field is the iteratively searched-for
|
|
||||||
// struct field value in below loop.
|
|
||||||
field reflect.StructField
|
|
||||||
)
|
|
||||||
|
|
||||||
for len(names) > 0 {
|
|
||||||
// Pop next name.
|
|
||||||
name := popName()
|
|
||||||
|
|
||||||
// Follow any ptrs leading to field.
|
|
||||||
for t.Kind() == reflect.Pointer {
|
|
||||||
t = t.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.Kind() != reflect.Struct {
|
|
||||||
// The end type after following ptrs must be struct.
|
|
||||||
panicf("field %s is not struct (ptr): %s", t, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look for next field by name.
|
|
||||||
field, ok = t.FieldByName(name)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append next set of indices required to reach field.
|
|
||||||
sfield.index = append(sfield.index, field.Index...)
|
|
||||||
|
|
||||||
// Set the next type.
|
|
||||||
t = field.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get final type hash func.
|
|
||||||
sfield.hasher = hasher(t)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// panicf provides a panic with string formatting.
|
|
||||||
func panicf(format string, args ...any) {
|
|
||||||
panic(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashPool provides a memory pool of xxh3
|
|
||||||
// hasher objects used indexing field vals.
|
|
||||||
var hashPool sync.Pool
|
|
||||||
|
|
||||||
// gethashbuf fetches hasher from memory pool.
|
|
||||||
func getHasher() *xxh3.Hasher {
|
|
||||||
v := hashPool.Get()
|
|
||||||
if v == nil {
|
|
||||||
v = new(xxh3.Hasher)
|
|
||||||
}
|
|
||||||
return v.(*xxh3.Hasher)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putHasher replaces hasher in memory pool.
|
|
||||||
func putHasher(h *xxh3.Hasher) {
|
|
||||||
h.Reset()
|
|
||||||
hashPool.Put(h)
|
|
||||||
}
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -56,7 +56,7 @@ codeberg.org/gruf/go-sched
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-store/v2/storage
|
codeberg.org/gruf/go-store/v2/storage
|
||||||
codeberg.org/gruf/go-store/v2/util
|
codeberg.org/gruf/go-store/v2/util
|
||||||
# codeberg.org/gruf/go-structr v0.2.0
|
# codeberg.org/gruf/go-structr v0.3.0
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
codeberg.org/gruf/go-structr
|
codeberg.org/gruf/go-structr
|
||||||
# codeberg.org/superseriousbusiness/exif-terminator v0.7.0
|
# codeberg.org/superseriousbusiness/exif-terminator v0.7.0
|
||||||
|
|
Loading…
Reference in a new issue