2022-11-11 11:18:38 +00:00
package result
import (
2022-12-16 22:36:52 +00:00
"context"
2022-11-11 11:18:38 +00:00
"reflect"
"time"
2023-06-21 19:08:48 +00:00
_ "unsafe"
2022-11-11 11:18:38 +00:00
"codeberg.org/gruf/go-cache/v3/ttl"
2022-12-16 22:36:52 +00:00
"codeberg.org/gruf/go-errors/v2"
2022-11-11 11:18:38 +00:00
)
2022-11-13 13:02:07 +00:00
// Lookup represents a struct object lookup method in the cache.
type Lookup struct {
// Name is a period ('.') separated string
// of struct fields this Key encompasses.
Name string
// AllowZero indicates whether to accept and cache
// under zero value keys, otherwise ignore them.
AllowZero bool
2022-12-16 22:36:52 +00:00
2023-05-09 14:17:43 +00:00
// Multi allows specifying a key capable of storing
// multiple results. Note this only supports invalidate.
Multi bool
2022-12-16 22:36:52 +00:00
// TODO: support toggling case sensitive lookups.
// CaseSensitive bool
2022-11-13 13:02:07 +00:00
}
// Cache provides a means of caching value structures, along with
// the results of attempting to load them. An example usecase of this
// cache would be in wrapping a database, allowing caching of sql.ErrNoRows.
2022-11-11 11:18:38 +00:00
type Cache [ Value any ] struct {
cache ttl . Cache [ int64 , result [ Value ] ] // underlying result cache
2023-04-19 11:46:42 +00:00
invalid func ( Value ) // store unwrapped invalidate callback.
2022-11-11 11:18:38 +00:00
lookups structKeys // pre-determined struct lookups
2022-12-16 22:36:52 +00:00
ignore func ( error ) bool // determines cacheable errors
2022-11-11 11:18:38 +00:00
copy func ( Value ) Value // copies a Value type
next int64 // update key counter
}
2022-12-16 22:36:52 +00:00
// New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
func New [ Value any ] ( lookups [ ] Lookup , copy func ( Value ) Value , cap int ) * Cache [ Value ] {
2022-11-11 11:18:38 +00:00
var z Value
// Determine generic type
t := reflect . TypeOf ( z )
// Iteratively deref pointer type
for t . Kind ( ) == reflect . Pointer {
t = t . Elem ( )
}
// Ensure that this is a struct type
if t . Kind ( ) != reflect . Struct {
panic ( "generic parameter type must be struct (or ptr to)" )
}
// Allocate new cache object
c := & Cache [ Value ] { copy : copy }
2022-11-13 13:02:07 +00:00
c . lookups = make ( [ ] structKey , len ( lookups ) )
2022-11-11 11:18:38 +00:00
for i , lookup := range lookups {
2023-01-06 10:16:09 +00:00
// Create keyed field info for lookup
c . lookups [ i ] = newStructKey ( lookup , t )
2022-11-11 11:18:38 +00:00
}
// Create and initialize underlying cache
c . cache . Init ( 0 , cap , 0 )
c . SetEvictionCallback ( nil )
c . SetInvalidateCallback ( nil )
2022-12-16 22:36:52 +00:00
c . IgnoreErrors ( nil )
2022-11-11 11:18:38 +00:00
return c
}
// Start will start the cache background eviction routine with given sweep frequency. If already
// running or a freq <= 0 provided, this is a no-op. This will block until eviction routine started.
func ( c * Cache [ Value ] ) Start ( freq time . Duration ) bool {
return c . cache . Start ( freq )
}
// Stop will stop cache background eviction routine. If not running this
// is a no-op. This will block until the eviction routine has stopped.
func ( c * Cache [ Value ] ) Stop ( ) bool {
return c . cache . Stop ( )
}
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items
// in the cache, this will simply add the change in TTL to their current expiry time.
func ( c * Cache [ Value ] ) SetTTL ( ttl time . Duration , update bool ) {
c . cache . SetTTL ( ttl , update )
}
// SetEvictionCallback sets the eviction callback to the provided hook.
func ( c * Cache [ Value ] ) SetEvictionCallback ( hook func ( Value ) ) {
if hook == nil {
// Ensure non-nil hook.
hook = func ( Value ) { }
}
2023-05-14 13:17:03 +00:00
c . cache . SetEvictionCallback ( func ( pkey int64 , res result [ Value ] ) {
c . cache . Lock ( )
for _ , key := range res . Keys {
2022-11-11 11:18:38 +00:00
// Delete key->pkey lookup
2022-12-16 22:36:52 +00:00
pkeys := key . info . pkeys
delete ( pkeys , key . key )
2022-11-11 11:18:38 +00:00
}
2023-05-14 13:17:03 +00:00
c . cache . Unlock ( )
2022-11-11 11:18:38 +00:00
2023-05-14 13:17:03 +00:00
if res . Error != nil {
2022-11-11 11:18:38 +00:00
// Skip error hooks
return
}
// Call user hook.
2023-05-14 13:17:03 +00:00
hook ( res . Value )
2022-11-11 11:18:38 +00:00
} )
}
// SetInvalidateCallback sets the invalidate callback to the provided hook.
func ( c * Cache [ Value ] ) SetInvalidateCallback ( hook func ( Value ) ) {
if hook == nil {
// Ensure non-nil hook.
hook = func ( Value ) { }
2023-04-19 11:46:42 +00:00
} // store hook.
c . invalid = hook
2023-05-14 13:17:03 +00:00
c . cache . SetInvalidateCallback ( func ( pkey int64 , res result [ Value ] ) {
c . cache . Lock ( )
for _ , key := range res . Keys {
2022-11-13 13:02:07 +00:00
// Delete key->pkey lookup
2022-12-16 22:36:52 +00:00
pkeys := key . info . pkeys
delete ( pkeys , key . key )
2022-11-11 11:18:38 +00:00
}
2023-05-14 13:17:03 +00:00
c . cache . Unlock ( )
2022-11-11 11:18:38 +00:00
2023-05-14 13:17:03 +00:00
if res . Error != nil {
2022-11-11 11:18:38 +00:00
// Skip error hooks
return
}
// Call user hook.
2023-05-14 13:17:03 +00:00
hook ( res . Value )
2022-11-11 11:18:38 +00:00
} )
}
2022-12-16 22:36:52 +00:00
// IgnoreErrors allows setting a function hook to determine which error types should / not be cached.
func ( c * Cache [ Value ] ) IgnoreErrors ( ignore func ( error ) bool ) {
if ignore == nil {
ignore = func ( err error ) bool {
2023-04-29 16:44:20 +00:00
return errors . Comparable (
2022-12-16 22:36:52 +00:00
err ,
context . Canceled ,
context . DeadlineExceeded ,
)
}
}
c . cache . Lock ( )
c . ignore = ignore
c . cache . Unlock ( )
}
// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result.
2022-11-11 11:18:38 +00:00
func ( c * Cache [ Value ] ) Load ( lookup string , load func ( ) ( Value , error ) , keyParts ... any ) ( Value , error ) {
var (
zero Value
res result [ Value ]
2023-05-09 14:17:43 +00:00
ok bool
2022-11-11 11:18:38 +00:00
)
2022-11-13 13:02:07 +00:00
// Get lookup key info by name.
keyInfo := c . lookups . get ( lookup )
2023-05-09 14:17:43 +00:00
if ! keyInfo . unique {
panic ( "non-unique lookup does not support load: " + lookup )
}
2022-11-11 11:18:38 +00:00
// Generate cache key string.
2023-01-06 10:16:09 +00:00
ckey := keyInfo . genKey ( keyParts )
2022-11-11 11:18:38 +00:00
// Acquire cache lock
c . cache . Lock ( )
2022-12-16 22:36:52 +00:00
// Look for primary cache key
2023-05-09 14:17:43 +00:00
pkeys := keyInfo . pkeys [ ckey ]
2022-11-11 11:18:38 +00:00
2023-05-09 14:17:43 +00:00
if ok = ( len ( pkeys ) > 0 ) ; ok {
2023-05-14 13:17:03 +00:00
var entry * ttl . Entry [ int64 , result [ Value ] ]
2022-11-11 11:18:38 +00:00
// Fetch the result for primary key
2023-05-14 13:17:03 +00:00
entry , ok = c . cache . Cache . Get ( pkeys [ 0 ] )
if ok {
// Since the invalidation / eviction hooks acquire a mutex
// lock separately, and only at this point are the pkeys
// updated, there is a chance that a primary key may return
// no matching entry. Hence we have to check for it here.
res = entry . Value
}
2022-11-11 11:18:38 +00:00
}
// Done with lock
c . cache . Unlock ( )
if ! ok {
2022-12-16 22:36:52 +00:00
// Generate fresh result.
value , err := load ( )
if err != nil {
if c . ignore ( err ) {
// don't cache this error type
return zero , err
}
// Store error result.
res . Error = err
2022-11-11 11:18:38 +00:00
// This load returned an error, only
// store this item under provided key.
2022-12-16 22:36:52 +00:00
res . Keys = [ ] cacheKey { {
info : keyInfo ,
key : ckey ,
2022-11-11 11:18:38 +00:00
} }
} else {
2022-12-16 22:36:52 +00:00
// Store value result.
res . Value = value
2022-11-11 11:18:38 +00:00
// This was a successful load, generate keys.
res . Keys = c . lookups . generate ( res . Value )
}
2023-05-14 13:17:03 +00:00
var evict func ( )
2022-11-11 11:18:38 +00:00
// Acquire cache lock.
c . cache . Lock ( )
2023-05-14 13:17:03 +00:00
defer func ( ) {
c . cache . Unlock ( )
if evict != nil {
evict ( )
}
} ( )
2022-11-11 11:18:38 +00:00
2023-05-14 13:17:03 +00:00
// Store result in cache.
evict = c . store ( res )
2022-11-11 11:18:38 +00:00
}
// Catch and return error
if res . Error != nil {
return zero , res . Error
}
// Return a copy of value from cache
return c . copy ( res . Value ) , nil
}
2022-11-13 13:02:07 +00:00
// Store will call the given store function, and on success store the value in the cache as a positive result.
2022-11-11 11:18:38 +00:00
func ( c * Cache [ Value ] ) Store ( value Value , store func ( ) error ) error {
// Attempt to store this value.
if err := store ( ) ; err != nil {
return err
}
// Prepare cached result.
result := result [ Value ] {
Keys : c . lookups . generate ( value ) ,
Value : c . copy ( value ) ,
Error : nil ,
}
2023-05-14 13:17:03 +00:00
var evict func ( )
2022-11-11 11:18:38 +00:00
// Acquire cache lock.
c . cache . Lock ( )
2023-05-14 13:17:03 +00:00
defer func ( ) {
c . cache . Unlock ( )
if evict != nil {
evict ( )
}
} ( )
2022-11-11 11:18:38 +00:00
2023-05-14 13:17:03 +00:00
// Store result in cache.
evict = c . store ( result )
2022-11-11 11:18:38 +00:00
2023-04-19 11:46:42 +00:00
// Call invalidate.
c . invalid ( value )
2022-11-11 11:18:38 +00:00
return nil
}
2022-11-13 13:02:07 +00:00
// Has checks the cache for a positive result under the given lookup and key parts.
2022-11-11 11:18:38 +00:00
func ( c * Cache [ Value ] ) Has ( lookup string , keyParts ... any ) bool {
var res result [ Value ]
2023-05-09 14:17:43 +00:00
var ok bool
2022-11-11 11:18:38 +00:00
2023-01-06 10:16:09 +00:00
// Get lookup key info by name.
keyInfo := c . lookups . get ( lookup )
2023-05-09 14:17:43 +00:00
if ! keyInfo . unique {
panic ( "non-unique lookup does not support has: " + lookup )
}
2022-11-11 11:18:38 +00:00
// Generate cache key string.
2023-01-06 10:16:09 +00:00
ckey := keyInfo . genKey ( keyParts )
2022-11-11 11:18:38 +00:00
// Acquire cache lock
c . cache . Lock ( )
2022-11-13 13:02:07 +00:00
// Look for primary key for cache key
2023-05-09 14:17:43 +00:00
pkeys := keyInfo . pkeys [ ckey ]
2022-11-11 11:18:38 +00:00
2023-05-09 14:17:43 +00:00
if ok = ( len ( pkeys ) > 0 ) ; ok {
2023-05-14 13:17:03 +00:00
var entry * ttl . Entry [ int64 , result [ Value ] ]
2022-11-11 11:18:38 +00:00
// Fetch the result for primary key
2023-05-14 13:17:03 +00:00
entry , ok = c . cache . Cache . Get ( pkeys [ 0 ] )
if ok {
// Since the invalidation / eviction hooks acquire a mutex
// lock separately, and only at this point are the pkeys
// updated, there is a chance that a primary key may return
// no matching entry. Hence we have to check for it here.
res = entry . Value
}
2022-11-11 11:18:38 +00:00
}
// Done with lock
c . cache . Unlock ( )
// Check for non-error result.
return ok && ( res . Error == nil )
}
2022-11-13 13:02:07 +00:00
// Invalidate will invalidate any result from the cache found under given lookup and key parts.
2022-11-11 11:18:38 +00:00
func ( c * Cache [ Value ] ) Invalidate ( lookup string , keyParts ... any ) {
2023-01-06 10:16:09 +00:00
// Get lookup key info by name.
keyInfo := c . lookups . get ( lookup )
2022-11-11 11:18:38 +00:00
// Generate cache key string.
2023-01-06 10:16:09 +00:00
ckey := keyInfo . genKey ( keyParts )
2022-11-11 11:18:38 +00:00
2022-11-13 13:02:07 +00:00
// Look for primary key for cache key
2022-11-11 11:18:38 +00:00
c . cache . Lock ( )
2023-05-09 14:17:43 +00:00
pkeys := keyInfo . pkeys [ ckey ]
2023-05-14 13:17:03 +00:00
delete ( keyInfo . pkeys , ckey )
2022-11-11 11:18:38 +00:00
c . cache . Unlock ( )
2023-05-14 13:17:03 +00:00
// Invalidate all primary keys.
c . cache . InvalidateAll ( pkeys ... )
2022-11-11 11:18:38 +00:00
}
// Clear empties the cache, calling the invalidate callback.
2023-05-09 14:17:43 +00:00
func ( c * Cache [ Value ] ) Clear ( ) { c . cache . Clear ( ) }
2022-11-11 11:18:38 +00:00
2022-12-16 22:36:52 +00:00
// store will cache this result under all of its required cache keys.
2023-05-14 13:17:03 +00:00
func ( c * Cache [ Value ] ) store ( res result [ Value ] ) ( evict func ( ) ) {
2022-11-11 11:18:38 +00:00
// Get primary key
2023-05-09 14:17:43 +00:00
pnext := c . next
2022-11-11 11:18:38 +00:00
c . next ++
2023-05-09 14:17:43 +00:00
if pnext > c . next {
2022-12-16 22:36:52 +00:00
panic ( "cache primary key overflow" )
}
2022-11-11 11:18:38 +00:00
for _ , key := range res . Keys {
2023-05-09 14:17:43 +00:00
// Look for cache primary keys.
pkeys := key . info . pkeys [ key . key ]
if key . info . unique && len ( pkeys ) > 0 {
for _ , conflict := range pkeys {
// Get the overlapping result with this key.
entry , _ := c . cache . Cache . Get ( conflict )
// From conflicting entry, drop this key, this
// will prevent eviction cleanup key confusion.
entry . Value . Keys . drop ( key . info . name )
if len ( entry . Value . Keys ) == 0 {
// We just over-wrote the only lookup key for
// this value, so we drop its primary key too.
c . cache . Cache . Delete ( conflict )
}
}
2023-05-14 13:17:03 +00:00
// Drop existing.
2023-05-09 14:17:43 +00:00
pkeys = pkeys [ : 0 ]
}
// Store primary key lookup.
pkeys = append ( pkeys , pnext )
key . info . pkeys [ key . key ] = pkeys
2022-11-11 11:18:38 +00:00
}
// Store main entry under primary key, using evict hook if needed
2023-05-09 14:17:43 +00:00
c . cache . Cache . SetWithHook ( pnext , & ttl . Entry [ int64 , result [ Value ] ] {
2023-06-21 19:08:48 +00:00
Expiry : c . expiry ( ) ,
2023-05-09 14:17:43 +00:00
Key : pnext ,
2022-11-11 11:18:38 +00:00
Value : res ,
} , func ( _ int64 , item * ttl . Entry [ int64 , result [ Value ] ] ) {
2023-05-14 13:17:03 +00:00
evict = func ( ) { c . cache . Evict ( item . Key , item . Value ) }
2022-11-11 11:18:38 +00:00
} )
2023-05-14 13:17:03 +00:00
return evict
2022-11-11 11:18:38 +00:00
}
2023-06-21 19:08:48 +00:00
//go:linkname runtime_nanotime runtime.nanotime
func runtime_nanotime ( ) uint64
// expiry returns an the next expiry time to use for an entry,
// which is equivalent to time.Now().Add(ttl), or zero if disabled.
func ( c * Cache [ Value ] ) expiry ( ) uint64 {
if ttl := c . cache . TTL ; ttl > 0 {
return runtime_nanotime ( ) +
uint64 ( c . cache . TTL )
}
return 0
}
2022-11-11 11:18:38 +00:00
type result [ Value any ] struct {
// keys accessible under
2022-11-14 09:14:34 +00:00
Keys cacheKeys
2022-11-11 11:18:38 +00:00
// cached value
Value Value
// cached error
Error error
}