2024-01-19 12:57:29 +00:00
|
|
|
package structr
|
|
|
|
|
|
|
|
import (
|
2024-01-29 15:13:53 +00:00
|
|
|
"reflect"
|
2024-01-19 12:57:29 +00:00
|
|
|
"strings"
|
2024-01-29 15:13:53 +00:00
|
|
|
"sync"
|
|
|
|
"unsafe"
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
"codeberg.org/gruf/go-byteutil"
|
2024-05-13 08:05:46 +00:00
|
|
|
|
|
|
|
"github.com/dolthub/swiss"
|
2024-01-19 12:57:29 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// IndexConfig defines config variables
|
|
|
|
// for initializing a struct index.
|
|
|
|
type IndexConfig struct {
|
|
|
|
|
|
|
|
// Fields should contain a comma-separated
|
|
|
|
// list of struct fields used when generating
|
|
|
|
// keys for this index. Nested fields should
|
|
|
|
// be specified using periods. An example:
|
|
|
|
// "Username,Favorites.Color"
|
2024-01-29 15:13:53 +00:00
|
|
|
//
|
2024-04-11 09:46:08 +00:00
|
|
|
// Note that nested fields where the nested
|
|
|
|
// struct field is a ptr are supported, but
|
|
|
|
// nil ptr values in nesting will result in
|
|
|
|
// that particular value NOT being indexed.
|
|
|
|
// e.g. with "Favorites.Color" if *Favorites
|
|
|
|
// is nil then it will not be indexed.
|
|
|
|
//
|
|
|
|
// Field types supported include any of those
|
|
|
|
// supported by the `go-mangler` library.
|
2024-01-19 12:57:29 +00:00
|
|
|
Fields string
|
|
|
|
|
|
|
|
// Multiple indicates whether to accept multiple
|
|
|
|
// possible values for any single index key. The
|
|
|
|
// default behaviour is to only accept one value
|
|
|
|
// and overwrite existing on any write operation.
|
|
|
|
Multiple bool
|
|
|
|
|
|
|
|
// AllowZero indicates whether to accept zero
|
|
|
|
// value fields in index keys. i.e. whether to
|
|
|
|
// index structs for this set of field values
|
|
|
|
// IF any one of those field values is the zero
|
|
|
|
// value for that type. The default behaviour
|
|
|
|
// is to skip indexing structs for this lookup
|
|
|
|
// when any of the indexing fields are zero.
|
|
|
|
AllowZero bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Index is an exposed Cache internal model, used to
|
2024-01-29 15:13:53 +00:00
|
|
|
// extract struct keys, generate hash checksums for them
|
|
|
|
// and store struct results by the init defined config.
|
|
|
|
// This model is exposed to provide faster lookups in the
|
|
|
|
// case that you would like to manually provide the used
|
|
|
|
// index via the Cache.___By() series of functions, or
|
|
|
|
// access the underlying index key generator.
|
2024-04-02 10:03:40 +00:00
|
|
|
type Index struct {
|
|
|
|
|
|
|
|
// ptr is a pointer to
|
|
|
|
// the source Cache/Queue
|
|
|
|
// index is attached to.
|
|
|
|
ptr unsafe.Pointer
|
2024-01-19 12:57:29 +00:00
|
|
|
|
|
|
|
// name is the actual name of this
|
|
|
|
// index, which is the unparsed
|
|
|
|
// string value of contained fields.
|
|
|
|
name string
|
|
|
|
|
2024-01-29 15:13:53 +00:00
|
|
|
// backing data store of the index, containing
|
|
|
|
// the cached results contained within wrapping
|
|
|
|
// index_entry{} which also contains the exact
|
|
|
|
// key each result is stored under. the hash map
|
|
|
|
// only keys by the xxh3 hash checksum for speed.
|
2024-05-13 08:05:46 +00:00
|
|
|
data *swiss.Map[string, *list]
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// struct fields encompassed by
|
|
|
|
// keys (+ hashes) of this index.
|
2024-04-02 10:03:40 +00:00
|
|
|
fields []struct_field
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// index flags:
|
|
|
|
// - 1 << 0 = unique
|
|
|
|
// - 1 << 1 = allow zero
|
|
|
|
flags uint8
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Name returns the receiving Index name.
|
|
|
|
func (i *Index) Name() string {
|
|
|
|
return i.name
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Key generates Key{} from given parts for
|
|
|
|
// the type of lookup this Index uses in cache.
|
|
|
|
// NOTE: panics on incorrect no. parts / types given.
|
|
|
|
func (i *Index) Key(parts ...any) Key {
|
|
|
|
buf := new_buffer()
|
|
|
|
key := i.key(buf, parts)
|
|
|
|
free_buffer(buf)
|
|
|
|
return key
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Keys generates []Key{} from given (multiple) parts
|
|
|
|
// for the type of lookup this Index uses in the cache.
|
|
|
|
// NOTE: panics on incorrect no. parts / types given.
|
|
|
|
func (i *Index) Keys(parts ...[]any) []Key {
|
|
|
|
keys := make([]Key, 0, len(parts))
|
|
|
|
buf := new_buffer()
|
|
|
|
for _, parts := range parts {
|
|
|
|
key := i.key(buf, parts)
|
|
|
|
if key.Zero() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
keys = append(keys, key)
|
|
|
|
}
|
|
|
|
free_buffer(buf)
|
|
|
|
return keys
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// init will initialize the cache with given type, config and capacity.
|
|
|
|
func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
|
|
|
|
switch {
|
|
|
|
// The only 2 types we support are
|
|
|
|
// structs, and ptrs to a struct.
|
|
|
|
case t.Kind() == reflect.Struct:
|
|
|
|
case t.Kind() == reflect.Pointer &&
|
|
|
|
t.Elem().Kind() == reflect.Struct:
|
|
|
|
default:
|
|
|
|
panic("index only support struct{} and *struct{}")
|
|
|
|
}
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// Set name from the raw
|
|
|
|
// struct fields string.
|
2024-04-02 10:03:40 +00:00
|
|
|
i.name = cfg.Fields
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// Set struct flags.
|
2024-04-02 10:03:40 +00:00
|
|
|
if cfg.AllowZero {
|
2024-01-29 15:13:53 +00:00
|
|
|
set_allow_zero(&i.flags)
|
|
|
|
}
|
2024-04-02 10:03:40 +00:00
|
|
|
if !cfg.Multiple {
|
2024-01-29 15:13:53 +00:00
|
|
|
set_is_unique(&i.flags)
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Split to get containing struct fields.
|
|
|
|
fields := strings.Split(cfg.Fields, ",")
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// Preallocate expected struct field slice.
|
2024-04-02 10:03:40 +00:00
|
|
|
i.fields = make([]struct_field, len(fields))
|
2024-04-11 09:46:08 +00:00
|
|
|
for x, name := range fields {
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// Split name to account for nesting.
|
2024-04-11 09:46:08 +00:00
|
|
|
names := strings.Split(name, ".")
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// Look for usable struct field.
|
|
|
|
i.fields[x] = find_field(t, names)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize index_entry list store.
|
2024-05-13 08:05:46 +00:00
|
|
|
i.data = swiss.NewMap[string, *list](uint32(cap))
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// get_one will fetch one indexed item under key.
|
|
|
|
func (i *Index) get_one(key Key) *indexed_item {
|
|
|
|
// Get list at hash.
|
2024-05-13 08:05:46 +00:00
|
|
|
l, _ := i.data.Get(key.key)
|
2024-01-29 15:13:53 +00:00
|
|
|
if l == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2024-04-02 10:03:40 +00:00
|
|
|
|
|
|
|
// Extract entry from first list elem.
|
2024-01-29 15:13:53 +00:00
|
|
|
entry := (*index_entry)(l.head.data)
|
2024-04-02 10:03:40 +00:00
|
|
|
|
|
|
|
// Check contains expected key.
|
|
|
|
if !entry.key.Equal(key) {
|
|
|
|
return nil
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-04-02 10:03:40 +00:00
|
|
|
|
|
|
|
return entry.item
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// get will fetch all indexed items under key, passing each to hook.
|
|
|
|
func (i *Index) get(key Key, hook func(*indexed_item)) {
|
|
|
|
if hook == nil {
|
|
|
|
panic("nil hook")
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Get list at hash.
|
2024-05-13 08:05:46 +00:00
|
|
|
l, _ := i.data.Get(key.key)
|
2024-01-19 12:57:29 +00:00
|
|
|
if l == nil {
|
2024-04-02 10:03:40 +00:00
|
|
|
return
|
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Extract entry from first list elem.
|
|
|
|
entry := (*index_entry)(l.head.data)
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Check contains expected key.
|
|
|
|
if !entry.key.Equal(key) {
|
|
|
|
return
|
|
|
|
}
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Iterate all entries in list.
|
|
|
|
l.rangefn(func(elem *list_elem) {
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Extract element entry + item.
|
|
|
|
entry := (*index_entry)(elem.data)
|
|
|
|
item := entry.item
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Pass to hook.
|
|
|
|
hook(item)
|
|
|
|
})
|
|
|
|
}
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// key uses hasher to generate Key{} from given raw parts.
|
|
|
|
func (i *Index) key(buf *byteutil.Buffer, parts []any) Key {
|
|
|
|
if len(parts) != len(i.fields) {
|
|
|
|
panicf("incorrect number key parts: want=%d received=%d",
|
|
|
|
len(i.fields),
|
2024-04-11 09:46:08 +00:00
|
|
|
len(parts),
|
2024-04-02 10:03:40 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
buf.B = buf.B[:0]
|
|
|
|
if !allow_zero(i.flags) {
|
2024-04-11 09:46:08 +00:00
|
|
|
for x, field := range i.fields {
|
2024-04-02 10:03:40 +00:00
|
|
|
before := len(buf.B)
|
2024-04-11 09:46:08 +00:00
|
|
|
buf.B = field.mangle(buf.B, parts[x])
|
2024-05-13 08:05:46 +00:00
|
|
|
if string(buf.B[before:]) == field.zerostr {
|
2024-04-02 10:03:40 +00:00
|
|
|
return Key{}
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-04-02 10:03:40 +00:00
|
|
|
buf.B = append(buf.B, '.')
|
|
|
|
}
|
|
|
|
} else {
|
2024-04-11 09:46:08 +00:00
|
|
|
for x, field := range i.fields {
|
|
|
|
buf.B = field.mangle(buf.B, parts[x])
|
2024-04-02 10:03:40 +00:00
|
|
|
buf.B = append(buf.B, '.')
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-04-02 10:03:40 +00:00
|
|
|
}
|
|
|
|
return Key{
|
|
|
|
raw: parts,
|
|
|
|
key: string(buf.B),
|
|
|
|
}
|
|
|
|
}
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// append will append the given index entry to appropriate
|
|
|
|
// doubly-linked-list in index hashmap. this handles case
|
|
|
|
// of key collisions and overwriting 'unique' entries.
|
|
|
|
func (i *Index) append(key Key, item *indexed_item) {
|
|
|
|
// Look for existing.
|
2024-05-13 08:05:46 +00:00
|
|
|
l, _ := i.data.Get(key.key)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
if l == nil {
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Allocate new.
|
|
|
|
l = new_list()
|
2024-05-13 08:05:46 +00:00
|
|
|
i.data.Put(key.key, l)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
} else if is_unique(i.flags) {
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Remove head.
|
|
|
|
elem := l.head
|
|
|
|
l.remove(elem)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Drop index from inner item.
|
|
|
|
e := (*index_entry)(elem.data)
|
|
|
|
e.item.drop_index(e)
|
|
|
|
|
|
|
|
// Free unused entry.
|
|
|
|
free_index_entry(e)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Prepare new index entry.
|
|
|
|
entry := new_index_entry()
|
|
|
|
entry.item = item
|
2024-01-29 15:13:53 +00:00
|
|
|
entry.key = key
|
2024-04-02 10:03:40 +00:00
|
|
|
entry.index = i
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Add ourselves to item's index tracker.
|
|
|
|
item.indexed = append(item.indexed, entry)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Add entry to index list.
|
|
|
|
l.push_front(&entry.elem)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// delete will remove all indexed items under key, passing each to hook.
|
|
|
|
func (i *Index) delete(key Key, hook func(*indexed_item)) {
|
|
|
|
if hook == nil {
|
|
|
|
panic("nil hook")
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-01-29 15:13:53 +00:00
|
|
|
// Get list at hash.
|
2024-05-13 08:05:46 +00:00
|
|
|
l, _ := i.data.Get(key.key)
|
2024-01-19 12:57:29 +00:00
|
|
|
if l == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Extract entry from first list elem.
|
2024-01-29 15:13:53 +00:00
|
|
|
entry := (*index_entry)(l.head.data)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Check contains expected key.
|
|
|
|
if !entry.key.Equal(key) {
|
2024-01-29 15:13:53 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete data at hash.
|
2024-05-13 08:05:46 +00:00
|
|
|
i.data.Delete(key.key)
|
2024-01-29 15:13:53 +00:00
|
|
|
|
|
|
|
// Iterate entries in list.
|
2024-01-19 12:57:29 +00:00
|
|
|
for x := 0; x < l.len; x++ {
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Pop list head.
|
|
|
|
elem := l.head
|
|
|
|
l.remove(elem)
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Extract element entry + item.
|
|
|
|
entry := (*index_entry)(elem.data)
|
|
|
|
item := entry.item
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Drop index from item.
|
|
|
|
item.drop_index(entry)
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Free now-unused entry.
|
|
|
|
free_index_entry(entry)
|
|
|
|
|
|
|
|
// Pass to hook.
|
|
|
|
hook(item)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Release list.
|
|
|
|
free_list(l)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// delete_entry deletes the given index entry.
|
|
|
|
func (i *Index) delete_entry(entry *index_entry) {
|
2024-01-29 15:13:53 +00:00
|
|
|
// Get list at hash sum.
|
2024-05-13 08:05:46 +00:00
|
|
|
l, _ := i.data.Get(entry.key.key)
|
2024-01-29 15:13:53 +00:00
|
|
|
if l == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Remove list entry.
|
|
|
|
l.remove(&entry.elem)
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
if l.len == 0 {
|
|
|
|
// Remove entry list from map.
|
2024-05-13 08:05:46 +00:00
|
|
|
i.data.Delete(entry.key.key)
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Release list.
|
|
|
|
free_list(l)
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// Drop this index from item.
|
|
|
|
entry.item.drop_index(entry)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-05-13 08:05:46 +00:00
|
|
|
// compact will reduce the size of underlying
|
|
|
|
// index map if the cap vastly exceeds len.
|
|
|
|
func (i *Index) compact() {
|
|
|
|
|
|
|
|
// Maximum load factor before
|
|
|
|
// 'swiss' allocates new hmap:
|
|
|
|
// maxLoad = 7 / 8
|
|
|
|
//
|
|
|
|
// So we apply the inverse/2, once
|
|
|
|
// $maxLoad/2 % of hmap is empty we
|
|
|
|
// compact the map to drop buckets.
|
|
|
|
len := i.data.Count()
|
|
|
|
cap := i.data.Capacity()
|
|
|
|
if cap-len > (cap*7)/(8*2) {
|
|
|
|
|
|
|
|
// Create a new map only as big as required.
|
|
|
|
data := swiss.NewMap[string, *list](uint32(len))
|
|
|
|
i.data.Iter(func(k string, v *list) (stop bool) {
|
|
|
|
data.Put(k, v)
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
|
|
|
|
// Set new map.
|
|
|
|
i.data = data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// index_entry represents a single entry
|
|
|
|
// in an Index{}, where it will be accessible
|
|
|
|
// by Key{} pointing to a containing list{}.
|
2024-01-29 15:13:53 +00:00
|
|
|
type index_entry struct {
|
2024-04-02 10:03:40 +00:00
|
|
|
|
|
|
|
// list elem that entry is stored
|
|
|
|
// within, under containing index.
|
|
|
|
// elem.data is ptr to index_entry.
|
2024-01-29 15:13:53 +00:00
|
|
|
elem list_elem
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// hash checksum
|
|
|
|
// + raw key data
|
|
|
|
key Key
|
|
|
|
|
|
|
|
// index this is stored in.
|
|
|
|
index *Index
|
|
|
|
|
|
|
|
// underlying indexed item.
|
|
|
|
item *indexed_item
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
2024-01-19 12:57:29 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
var index_entry_pool sync.Pool
|
|
|
|
|
|
|
|
// new_index_entry returns a new prepared index_entry.
|
|
|
|
func new_index_entry() *index_entry {
|
|
|
|
v := index_entry_pool.Get()
|
2024-01-29 15:13:53 +00:00
|
|
|
if v == nil {
|
|
|
|
v = new(index_entry)
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
2024-01-29 15:13:53 +00:00
|
|
|
entry := v.(*index_entry)
|
2024-04-02 10:03:40 +00:00
|
|
|
ptr := unsafe.Pointer(entry)
|
|
|
|
entry.elem.data = ptr
|
2024-01-29 15:13:53 +00:00
|
|
|
return entry
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
// free_index_entry releases the index_entry.
|
|
|
|
func free_index_entry(entry *index_entry) {
|
2024-01-29 15:13:53 +00:00
|
|
|
entry.elem.data = nil
|
2024-04-02 10:03:40 +00:00
|
|
|
entry.key = Key{}
|
2024-01-29 15:13:53 +00:00
|
|
|
entry.index = nil
|
2024-04-02 10:03:40 +00:00
|
|
|
entry.item = nil
|
|
|
|
index_entry_pool.Put(entry)
|
|
|
|
}
|
2024-01-29 15:13:53 +00:00
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
func is_unique(f uint8) bool {
|
|
|
|
const mask = uint8(1) << 0
|
|
|
|
return f&mask != 0
|
2024-01-29 15:13:53 +00:00
|
|
|
}
|
|
|
|
|
2024-04-02 10:03:40 +00:00
|
|
|
func set_is_unique(f *uint8) {
|
|
|
|
const mask = uint8(1) << 0
|
|
|
|
(*f) |= mask
|
|
|
|
}
|
|
|
|
|
|
|
|
func allow_zero(f uint8) bool {
|
|
|
|
const mask = uint8(1) << 1
|
|
|
|
return f&mask != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func set_allow_zero(f *uint8) {
|
|
|
|
const mask = uint8(1) << 1
|
|
|
|
(*f) |= mask
|
2024-01-19 12:57:29 +00:00
|
|
|
}
|