2024-01-19 12:57:29 +00:00
|
|
|
package structr
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2024-04-02 10:03:40 +00:00
|
|
|
"reflect"
|
2024-01-19 12:57:29 +00:00
|
|
|
"sync"
|
2024-04-02 10:03:40 +00:00
|
|
|
"unsafe"
|
2024-01-19 12:57:29 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// DefaultIgnoreErr is the default function used to
|
|
|
|
// ignore (i.e. not cache) incoming error results during
|
|
|
|
// Load() calls. By default ignores context pkg errors.
|
|
|
|
func DefaultIgnoreErr(err error) bool {
|
|
|
|
return errors.Is(err, context.Canceled) ||
|
|
|
|
errors.Is(err, context.DeadlineExceeded)
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// CacheConfig defines config vars
|
2024-01-19 12:57:29 +00:00
|
|
|
// for initializing a struct cache.
|
2024-04-02 10:03:40 +00:00
|
|
|
type CacheConfig[StructType any] struct {
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Indices defines indices to create
|
|
|
|
// in the Cache for the receiving
|
|
|
|
// generic struct type parameter.
|
|
|
|
Indices []IndexConfig
|
|
|
|
|
|
|
|
// MaxSize defines the maximum number
|
2024-04-02 10:03:40 +00:00
|
|
|
// of items allowed in the Cache at
|
|
|
|
// one time, before old items start
|
2024-01-19 12:57:29 +00:00
|
|
|
// getting evicted.
|
|
|
|
MaxSize int
|
|
|
|
|
|
|
|
// IgnoreErr defines which errors to
|
|
|
|
// ignore (i.e. not cache) returned
|
|
|
|
// from load function callback calls.
|
|
|
|
// This may be left as nil, on which
|
|
|
|
// DefaultIgnoreErr will be used.
|
|
|
|
IgnoreErr func(error) bool
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Copy provides a means of copying
|
2024-01-19 12:57:29 +00:00
|
|
|
// cached values, to ensure returned values
|
|
|
|
// do not share memory with those in cache.
|
2024-04-02 10:03:40 +00:00
|
|
|
Copy func(StructType) StructType
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Invalidate is called when cache values
|
|
|
|
// (NOT errors) are invalidated, either
|
|
|
|
// as the values passed to Put() / Store(),
|
|
|
|
// or by the keys by calls to Invalidate().
|
|
|
|
Invalidate func(StructType)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cache provides a structure cache with automated
|
|
|
|
// indexing and lookups by any initialization-defined
|
2024-04-02 10:03:40 +00:00
|
|
|
// combination of fields. This also supports caching
|
|
|
|
// of negative results (errors!) returned by LoadOne().
|
2024-01-19 12:57:29 +00:00
|
|
|
type Cache[StructType any] struct {
|
|
|
|
|
|
|
|
// indices used in storing passed struct
|
|
|
|
// types by user defined sets of fields.
|
2024-04-02 10:03:40 +00:00
|
|
|
indices []Index
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// keeps track of all indexed items,
|
2024-01-19 12:57:29 +00:00
|
|
|
// in order of last recently used (LRU).
|
2024-04-02 10:03:40 +00:00
|
|
|
lru list
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// max cache size, imposes size
|
|
|
|
// limit on the lruList in order
|
|
|
|
// to evict old entries.
|
|
|
|
maxSize int
|
|
|
|
|
|
|
|
// hook functions.
|
|
|
|
ignore func(error) bool
|
|
|
|
copy func(StructType) StructType
|
|
|
|
invalid func(StructType)
|
|
|
|
|
|
|
|
// protective mutex, guards:
|
|
|
|
// - Cache{}.lruList
|
|
|
|
// - Index{}.data
|
|
|
|
// - Cache{} hook fns
|
|
|
|
mutex sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes the cache with given configuration
|
|
|
|
// including struct fields to index, and necessary fns.
|
2024-04-02 10:03:40 +00:00
|
|
|
func (c *Cache[T]) Init(config CacheConfig[T]) {
|
|
|
|
t := reflect.TypeOf((*T)(nil)).Elem()
|
|
|
|
|
2024-01-19 12:57:29 +00:00
|
|
|
if len(config.Indices) == 0 {
|
|
|
|
panic("no indices provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.IgnoreErr == nil {
|
|
|
|
config.IgnoreErr = DefaultIgnoreErr
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
if config.Copy == nil {
|
|
|
|
panic("copy function must be provided")
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if config.MaxSize < 2 {
|
|
|
|
panic("minimum cache size is 2 for LRU to work")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Safely copy over
|
|
|
|
// provided config.
|
|
|
|
c.mutex.Lock()
|
2024-04-02 10:03:40 +00:00
|
|
|
c.indices = make([]Index, len(config.Indices))
|
2024-01-26 12:14:10 +00:00
|
|
|
for i, cfg := range config.Indices {
|
2024-04-02 10:03:40 +00:00
|
|
|
c.indices[i].ptr = unsafe.Pointer(c)
|
|
|
|
c.indices[i].init(t, cfg, config.MaxSize)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
c.ignore = config.IgnoreErr
|
2024-04-02 10:03:40 +00:00
|
|
|
c.copy = config.Copy
|
2024-01-19 12:57:29 +00:00
|
|
|
c.invalid = config.Invalidate
|
|
|
|
c.maxSize = config.MaxSize
|
|
|
|
c.mutex.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Index selects index with given name from cache, else panics.
|
2024-04-02 10:03:40 +00:00
|
|
|
func (c *Cache[T]) Index(name string) *Index {
|
2024-01-19 12:57:29 +00:00
|
|
|
for i := range c.indices {
|
|
|
|
if c.indices[i].name == name {
|
|
|
|
return &c.indices[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
panic("unknown index: " + name)
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// GetOne fetches value from cache stored under index, using precalculated index key.
|
|
|
|
func (c *Cache[T]) GetOne(index *Index, key Key) (T, bool) {
|
|
|
|
values := c.Get(index, key)
|
2024-01-19 12:57:29 +00:00
|
|
|
if len(values) == 0 {
|
|
|
|
var zero T
|
|
|
|
return zero, false
|
|
|
|
}
|
|
|
|
return values[0], true
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Get fetches values from the cache stored under index, using precalculated index keys.
|
|
|
|
func (c *Cache[T]) Get(index *Index, keys ...Key) []T {
|
2024-01-19 12:57:29 +00:00
|
|
|
if index == nil {
|
|
|
|
panic("no index given")
|
2024-04-02 10:03:40 +00:00
|
|
|
} else if index.ptr != unsafe.Pointer(c) {
|
|
|
|
panic("invalid index for cache")
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Preallocate expected ret slice.
|
|
|
|
values := make([]T, 0, len(keys))
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
2024-04-26 12:50:46 +00:00
|
|
|
defer c.mutex.Unlock()
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Check cache init.
|
|
|
|
if c.copy == nil {
|
|
|
|
panic("not initialized")
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
for i := range keys {
|
|
|
|
// Concatenate all *values* from cached items.
|
|
|
|
index.get(keys[i], func(item *indexed_item) {
|
|
|
|
if value, ok := item.data.(T); ok {
|
2024-01-29 15:13:53 +00:00
|
|
|
// Append value COPY.
|
|
|
|
value = c.copy(value)
|
2024-01-19 12:57:29 +00:00
|
|
|
values = append(values, value)
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Push to front of LRU list, USING
|
|
|
|
// THE ITEM'S LRU ENTRY, NOT THE
|
|
|
|
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
|
|
|
c.lru.move_front(&item.elem)
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
|
|
|
})
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return values
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put will insert the given values into cache,
|
|
|
|
// calling any invalidate hook on each value.
|
|
|
|
func (c *Cache[T]) Put(values ...T) {
|
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
|
|
|
|
2024-04-26 12:50:46 +00:00
|
|
|
// Wrap unlock to only do once.
|
|
|
|
unlock := once(c.mutex.Unlock)
|
|
|
|
defer unlock()
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Check cache init.
|
|
|
|
if c.copy == nil {
|
|
|
|
panic("not initialized")
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Store all passed values.
|
|
|
|
for i := range values {
|
|
|
|
c.store_value(
|
|
|
|
nil,
|
|
|
|
Key{},
|
|
|
|
values[i],
|
|
|
|
)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-26 12:50:46 +00:00
|
|
|
// Get func ptrs.
|
|
|
|
invalid := c.invalid
|
|
|
|
|
|
|
|
// Done with
|
|
|
|
// the lock.
|
|
|
|
unlock()
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
if invalid != nil {
|
|
|
|
// Pass all invalidated values
|
|
|
|
// to given user hook (if set).
|
|
|
|
for _, value := range values {
|
|
|
|
invalid(value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// LoadOneBy fetches one result from the cache stored under index, using precalculated index key.
|
|
|
|
// In the case that no result is found, provided load callback will be used to hydrate the cache.
|
2024-04-02 10:03:40 +00:00
|
|
|
func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, error) {
|
2024-01-19 12:57:29 +00:00
|
|
|
if index == nil {
|
|
|
|
panic("no index given")
|
2024-04-02 10:03:40 +00:00
|
|
|
} else if index.ptr != unsafe.Pointer(c) {
|
|
|
|
panic("invalid index for cache")
|
2024-01-29 15:13:53 +00:00
|
|
|
} else if !is_unique(index.flags) {
|
2024-01-19 12:57:29 +00:00
|
|
|
panic("cannot get one by non-unique index")
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2024-04-02 10:03:40 +00:00
|
|
|
// whether an item was found
|
2024-01-19 12:57:29 +00:00
|
|
|
// (and so val / err are set).
|
|
|
|
ok bool
|
|
|
|
|
|
|
|
// separate value / error ptrs
|
2024-04-02 10:03:40 +00:00
|
|
|
// as the item is liable to
|
2024-01-19 12:57:29 +00:00
|
|
|
// change outside of lock.
|
|
|
|
val T
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
|
|
|
|
2024-04-26 12:50:46 +00:00
|
|
|
// Wrap unlock to only do once.
|
|
|
|
unlock := once(c.mutex.Unlock)
|
|
|
|
defer unlock()
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Check init'd.
|
|
|
|
if c.copy == nil ||
|
2024-04-26 12:50:46 +00:00
|
|
|
c.ignore == nil {
|
2024-01-19 12:57:29 +00:00
|
|
|
panic("not initialized")
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Get item indexed at key.
|
|
|
|
item := index.get_one(key)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
if ok = (item != nil); ok {
|
2024-04-11 09:46:08 +00:00
|
|
|
var is bool
|
|
|
|
|
|
|
|
if val, is = item.data.(T); is {
|
2024-04-02 10:03:40 +00:00
|
|
|
// Set value COPY.
|
2024-04-11 09:46:08 +00:00
|
|
|
val = c.copy(val)
|
2024-04-02 10:03:40 +00:00
|
|
|
|
|
|
|
// Push to front of LRU list, USING
|
|
|
|
// THE ITEM'S LRU ENTRY, NOT THE
|
|
|
|
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
|
|
|
c.lru.move_front(&item.elem)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-11 09:46:08 +00:00
|
|
|
} else {
|
|
|
|
|
|
|
|
// Attempt to return error.
|
|
|
|
err, _ = item.data.(error)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-26 12:50:46 +00:00
|
|
|
// Get func ptrs.
|
|
|
|
ignore := c.ignore
|
|
|
|
|
|
|
|
// Done with
|
|
|
|
// the lock.
|
|
|
|
unlock()
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
if ok {
|
2024-04-02 10:03:40 +00:00
|
|
|
// item found!
|
2024-01-19 12:57:29 +00:00
|
|
|
return val, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load new result.
|
|
|
|
val, err = load()
|
|
|
|
|
|
|
|
// Check for ignored
|
|
|
|
// (transient) errors.
|
|
|
|
if ignore(err) {
|
|
|
|
return val, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Index this new loaded item.
|
2024-01-19 12:57:29 +00:00
|
|
|
// Note this handles copying of
|
|
|
|
// the provided value, so it is
|
|
|
|
// safe for us to return as-is.
|
2024-01-29 15:13:53 +00:00
|
|
|
if err != nil {
|
2024-04-02 10:03:40 +00:00
|
|
|
c.store_error(index, key, err)
|
2024-01-29 15:13:53 +00:00
|
|
|
} else {
|
2024-04-02 10:03:40 +00:00
|
|
|
c.store_value(index, key, val)
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Done with lock.
|
|
|
|
c.mutex.Unlock()
|
|
|
|
|
|
|
|
return val, err
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Load fetches values from the cache stored under index, using precalculated index keys. The cache will attempt to
|
|
|
|
// results with values stored under keys, passing keys with uncached results to the provider load callback to further
|
|
|
|
// hydrate the cache with missing results. Cached error results not included or returned by this function.
|
|
|
|
func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error)) ([]T, error) {
|
2024-01-19 12:57:29 +00:00
|
|
|
if index == nil {
|
|
|
|
panic("no index given")
|
2024-04-02 10:03:40 +00:00
|
|
|
} else if index.ptr != unsafe.Pointer(c) {
|
|
|
|
panic("invalid index for cache")
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Preallocate expected ret slice.
|
|
|
|
values := make([]T, 0, len(keys))
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-01-19 12:57:29 +00:00
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
|
|
|
|
2024-04-26 12:50:46 +00:00
|
|
|
// Wrap unlock to only do once.
|
|
|
|
unlock := once(c.mutex.Unlock)
|
|
|
|
defer unlock()
|
|
|
|
|
2024-01-19 12:57:29 +00:00
|
|
|
// Check init'd.
|
|
|
|
if c.copy == nil {
|
|
|
|
panic("not initialized")
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
for i := 0; i < len(keys); {
|
2024-01-29 15:13:53 +00:00
|
|
|
// Value length before
|
|
|
|
// any below appends.
|
|
|
|
before := len(values)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Concatenate all *values* from cached items.
|
|
|
|
index.get(keys[i], func(item *indexed_item) {
|
|
|
|
if value, ok := item.data.(T); ok {
|
2024-01-29 15:13:53 +00:00
|
|
|
// Append value COPY.
|
|
|
|
value = c.copy(value)
|
2024-01-19 12:57:29 +00:00
|
|
|
values = append(values, value)
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Push to front of LRU list, USING
|
|
|
|
// THE ITEM'S LRU ENTRY, NOT THE
|
|
|
|
// INDEX KEY ENTRY. VERY IMPORTANT!!
|
|
|
|
c.lru.move_front(&item.elem)
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
|
|
|
})
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-01-29 15:13:53 +00:00
|
|
|
// Only if values changed did
|
|
|
|
// we actually find anything.
|
2024-04-02 10:03:40 +00:00
|
|
|
if len(values) != before {
|
|
|
|
|
|
|
|
// We found values at key,
|
|
|
|
// drop key from the slice.
|
|
|
|
copy(keys[i:], keys[i+1:])
|
|
|
|
keys = keys[:len(keys)-1]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iter
|
|
|
|
i++
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-26 12:50:46 +00:00
|
|
|
// Done with
|
|
|
|
// the lock.
|
|
|
|
unlock()
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-01-19 12:57:29 +00:00
|
|
|
// Load uncached values.
|
2024-04-02 10:03:40 +00:00
|
|
|
uncached, err := load(keys)
|
2024-01-19 12:57:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-04-26 12:50:46 +00:00
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
|
|
|
|
|
|
|
// Store all uncached values.
|
|
|
|
for i := range uncached {
|
|
|
|
c.store_value(
|
|
|
|
nil,
|
|
|
|
Key{},
|
|
|
|
uncached[i],
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done with lock.
|
|
|
|
c.mutex.Unlock()
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Append uncached to return values.
|
|
|
|
values = append(values, uncached...)
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
return values, nil
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Store will call the given store callback, on non-error then
|
|
|
|
// passing the provided value to the Put() function. On error
|
|
|
|
// return the value is still passed to stored invalidate hook.
|
|
|
|
func (c *Cache[T]) Store(value T, store func() error) error {
|
|
|
|
// Store value.
|
|
|
|
err := store()
|
|
|
|
if err != nil {
|
2024-04-02 10:03:40 +00:00
|
|
|
|
2024-01-19 12:57:29 +00:00
|
|
|
// Get func ptrs.
|
|
|
|
c.mutex.Lock()
|
|
|
|
invalid := c.invalid
|
|
|
|
c.mutex.Unlock()
|
|
|
|
|
|
|
|
// On error don't store
|
|
|
|
// value, but still pass
|
|
|
|
// to invalidate hook.
|
|
|
|
if invalid != nil {
|
|
|
|
invalid(value)
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store value.
|
|
|
|
c.Put(value)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Invalidate invalidates all results stored under index keys.
|
|
|
|
func (c *Cache[T]) Invalidate(index *Index, keys ...Key) {
|
2024-01-19 12:57:29 +00:00
|
|
|
if index == nil {
|
|
|
|
panic("no index given")
|
2024-04-02 10:03:40 +00:00
|
|
|
} else if index.ptr != unsafe.Pointer(c) {
|
|
|
|
panic("invalid index for cache")
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Preallocate expected ret slice.
|
|
|
|
values := make([]T, 0, len(keys))
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-11 09:46:08 +00:00
|
|
|
for _, key := range keys {
|
2024-04-02 10:03:40 +00:00
|
|
|
// Delete all items under key from index, collecting
|
|
|
|
// value items and dropping them from all their indices.
|
2024-04-11 09:46:08 +00:00
|
|
|
index.delete(key, func(item *indexed_item) {
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
if value, ok := item.data.(T); ok {
|
|
|
|
// No need to copy, as item
|
|
|
|
// being deleted from cache.
|
|
|
|
values = append(values, value)
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Delete cached.
|
|
|
|
c.delete(item)
|
|
|
|
})
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// Get func ptrs.
|
|
|
|
invalid := c.invalid
|
|
|
|
|
|
|
|
// Done with lock.
|
|
|
|
c.mutex.Unlock()
|
|
|
|
|
|
|
|
if invalid != nil {
|
|
|
|
// Pass all invalidated values
|
|
|
|
// to given user hook (if set).
|
|
|
|
for _, value := range values {
|
|
|
|
invalid(value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trim will truncate the cache to ensure it
|
|
|
|
// stays within given percentage of MaxSize.
|
|
|
|
func (c *Cache[T]) Trim(perc float64) {
|
|
|
|
// Acquire lock.
|
|
|
|
c.mutex.Lock()
|
|
|
|
|
|
|
|
// Calculate number of cache items to drop.
|
|
|
|
max := (perc / 100) * float64(c.maxSize)
|
2024-04-02 10:03:40 +00:00
|
|
|
diff := c.lru.len - int(max)
|
2024-01-19 12:57:29 +00:00
|
|
|
if diff <= 0 {
|
2024-04-02 10:03:40 +00:00
|
|
|
|
2024-01-19 12:57:29 +00:00
|
|
|
// Trim not needed.
|
|
|
|
c.mutex.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Iterate over 'diff' items
|
2024-01-19 12:57:29 +00:00
|
|
|
// from back (oldest) of cache.
|
|
|
|
for i := 0; i < diff; i++ {
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Get oldest LRU elem.
|
|
|
|
oldest := c.lru.tail
|
2024-01-19 12:57:29 +00:00
|
|
|
if oldest == nil {
|
2024-04-02 10:03:40 +00:00
|
|
|
|
|
|
|
// reached
|
|
|
|
// end.
|
2024-01-19 12:57:29 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Drop oldest item from cache.
|
|
|
|
item := (*indexed_item)(oldest.data)
|
|
|
|
c.delete(item)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Done with lock.
|
|
|
|
c.mutex.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear empties the cache by calling .Trim(0).
|
|
|
|
func (c *Cache[T]) Clear() { c.Trim(0) }
|
|
|
|
|
|
|
|
// Len returns the current length of cache.
|
|
|
|
func (c *Cache[T]) Len() int {
|
|
|
|
c.mutex.Lock()
|
2024-04-02 10:03:40 +00:00
|
|
|
l := c.lru.len
|
2024-01-19 12:57:29 +00:00
|
|
|
c.mutex.Unlock()
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
2024-05-06 08:51:06 +00:00
|
|
|
// Debug returns debug stats about cache.
|
|
|
|
func (c *Cache[T]) Debug() map[string]any {
|
|
|
|
m := make(map[string]any)
|
|
|
|
c.mutex.Lock()
|
|
|
|
m["lru"] = c.lru.len
|
|
|
|
indices := make(map[string]any)
|
|
|
|
m["indices"] = indices
|
|
|
|
for i := range c.indices {
|
|
|
|
var n uint64
|
|
|
|
for _, list := range c.indices[i].data {
|
|
|
|
n += uint64(list.len)
|
|
|
|
}
|
|
|
|
indices[c.indices[i].name] = n
|
|
|
|
}
|
|
|
|
c.mutex.Unlock()
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2024-01-19 12:57:29 +00:00
|
|
|
// Cap returns the maximum capacity (size) of cache.
|
|
|
|
func (c *Cache[T]) Cap() int {
|
|
|
|
c.mutex.Lock()
|
|
|
|
m := c.maxSize
|
|
|
|
c.mutex.Unlock()
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
func (c *Cache[T]) store_value(index *Index, key Key, value T) {
|
|
|
|
// Alloc new index item.
|
|
|
|
item := new_indexed_item()
|
2024-05-06 19:44:22 +00:00
|
|
|
if cap(item.indexed) < len(c.indices) {
|
|
|
|
|
|
|
|
// Preallocate item indices slice to prevent Go auto
|
|
|
|
// allocating overlying large slices we don't need.
|
|
|
|
item.indexed = make([]*index_entry, 0, len(c.indices))
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-01-29 15:13:53 +00:00
|
|
|
// Create COPY of value.
|
|
|
|
value = c.copy(value)
|
2024-04-02 10:03:40 +00:00
|
|
|
item.data = value
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
if index != nil {
|
|
|
|
// Append item to index.
|
|
|
|
index.append(key, item)
|
|
|
|
}
|
|
|
|
|
2024-04-11 09:46:08 +00:00
|
|
|
// Get ptr to value data.
|
|
|
|
ptr := unsafe.Pointer(&value)
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Acquire key buf.
|
|
|
|
buf := new_buffer()
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-01-29 15:13:53 +00:00
|
|
|
for i := range c.indices {
|
|
|
|
// Get current index ptr.
|
|
|
|
idx := &(c.indices[i])
|
|
|
|
if idx == index {
|
2024-04-02 10:03:40 +00:00
|
|
|
|
2024-01-29 15:13:53 +00:00
|
|
|
// Already stored under
|
|
|
|
// this index, ignore.
|
|
|
|
continue
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Extract fields comprising index key.
|
2024-04-11 09:46:08 +00:00
|
|
|
parts := extract_fields(ptr, idx.fields)
|
|
|
|
if parts == nil {
|
|
|
|
continue
|
|
|
|
}
|
2024-04-02 10:03:40 +00:00
|
|
|
|
|
|
|
// Calculate index key.
|
|
|
|
key := idx.key(buf, parts)
|
|
|
|
if key.Zero() {
|
2024-01-29 15:13:53 +00:00
|
|
|
continue
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Append item to index.
|
|
|
|
idx.append(key, item)
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Add item to main lru list.
|
|
|
|
c.lru.push_front(&item.elem)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Done with buf.
|
|
|
|
free_buffer(buf)
|
|
|
|
|
|
|
|
if c.lru.len > c.maxSize {
|
2024-01-29 15:13:53 +00:00
|
|
|
// Cache has hit max size!
|
|
|
|
// Drop the oldest element.
|
2024-04-02 10:03:40 +00:00
|
|
|
ptr := c.lru.tail.data
|
|
|
|
item := (*indexed_item)(ptr)
|
|
|
|
c.delete(item)
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
func (c *Cache[T]) store_error(index *Index, key Key, err error) {
|
2024-01-29 15:13:53 +00:00
|
|
|
if index == nil {
|
|
|
|
// nothing we
|
|
|
|
// can do here.
|
|
|
|
return
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Alloc new index item.
|
|
|
|
item := new_indexed_item()
|
2024-05-06 19:44:22 +00:00
|
|
|
if cap(item.indexed) < len(c.indices) {
|
|
|
|
|
|
|
|
// Preallocate item indices slice to prevent Go auto
|
|
|
|
// allocating overlying large slices we don't need.
|
|
|
|
item.indexed = make([]*index_entry, 0, len(c.indices))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set error val.
|
2024-04-02 10:03:40 +00:00
|
|
|
item.data = err
|
|
|
|
|
|
|
|
// Append item to index.
|
|
|
|
index.append(key, item)
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Add item to main lru list.
|
|
|
|
c.lru.push_front(&item.elem)
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
if c.lru.len > c.maxSize {
|
2024-01-19 12:57:29 +00:00
|
|
|
// Cache has hit max size!
|
|
|
|
// Drop the oldest element.
|
2024-04-02 10:03:40 +00:00
|
|
|
ptr := c.lru.tail.data
|
|
|
|
item := (*indexed_item)(ptr)
|
|
|
|
c.delete(item)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
func (c *Cache[T]) delete(item *indexed_item) {
|
|
|
|
for len(item.indexed) != 0 {
|
2024-01-29 15:13:53 +00:00
|
|
|
// Pop last indexed entry from list.
|
2024-04-02 10:03:40 +00:00
|
|
|
entry := item.indexed[len(item.indexed)-1]
|
|
|
|
item.indexed = item.indexed[:len(item.indexed)-1]
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Drop index_entry from index.
|
|
|
|
entry.index.delete_entry(entry)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Drop entry from lru list.
|
|
|
|
c.lru.remove(&item.elem)
|
|
|
|
|
|
|
|
// Free now-unused item.
|
|
|
|
free_indexed_item(item)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|