[chore] pull in latest go-cache, go-runners versions (#1306)

Signed-off-by: kim <grufwub@gmail.com>

Signed-off-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2023-01-06 10:16:09 +00:00 committed by GitHub
parent 0dbe6c514f
commit adbc87700a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
23 changed files with 329 additions and 865 deletions

9
go.mod
View file

@ -5,13 +5,13 @@ go 1.19
require (
codeberg.org/gruf/go-bytesize v1.0.2
codeberg.org/gruf/go-byteutil v1.0.2
codeberg.org/gruf/go-cache/v3 v3.2.0
codeberg.org/gruf/go-cache/v3 v3.2.2
codeberg.org/gruf/go-debug v1.2.0
codeberg.org/gruf/go-errors/v2 v2.0.2
codeberg.org/gruf/go-kv v1.5.2
codeberg.org/gruf/go-logger/v2 v2.2.1
codeberg.org/gruf/go-mutexes v1.1.4
codeberg.org/gruf/go-runners v1.3.1
codeberg.org/gruf/go-runners v1.4.0
codeberg.org/gruf/go-store/v2 v2.0.10
github.com/abema/go-mp4 v0.9.0
github.com/buckket/go-blurhash v1.1.0
@ -69,12 +69,11 @@ require (
codeberg.org/gruf/go-fastpath v1.0.3 // indirect
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
codeberg.org/gruf/go-hashenc v1.0.2 // indirect
codeberg.org/gruf/go-mangler v1.1.1 // indirect
codeberg.org/gruf/go-mangler v1.2.2 // indirect
codeberg.org/gruf/go-maps v1.0.3 // indirect
codeberg.org/gruf/go-pools v1.1.0 // indirect
codeberg.org/gruf/go-sched v1.1.1 // indirect
codeberg.org/gruf/go-sched v1.2.0 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b // indirect
github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 // indirect

19
go.sum
View file

@ -69,8 +69,8 @@ codeberg.org/gruf/go-bytesize v1.0.2/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacp
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
codeberg.org/gruf/go-byteutil v1.0.2 h1:OesVyK5VKWeWdeDR00zRJ+Oy8hjXx1pBhn7WVvcZWVE=
codeberg.org/gruf/go-byteutil v1.0.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
codeberg.org/gruf/go-cache/v3 v3.2.0 h1:pHJhS3SqufVnA2bxgzQpBh9Mfsljqulx2ynpy6thTE8=
codeberg.org/gruf/go-cache/v3 v3.2.0/go.mod h1:d4xafgOjVE+4+82WjIqqJl8NQusXkgUHbkTuXoeB3fA=
codeberg.org/gruf/go-cache/v3 v3.2.2 h1:hq6/RITgpcArjzbYSyo3uFxfIw7wW3KqAQjEaN7dj58=
codeberg.org/gruf/go-cache/v3 v3.2.2/go.mod h1:+Eje6nCvN8QF71VyYjMWMnkdv6t1kHnCO/SvyC4K12Q=
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
@ -89,18 +89,18 @@ codeberg.org/gruf/go-kv v1.5.2 h1:B0RkAXLUXYn3Za1NzTXOcUvAc+JUC2ZadTMkCUDa0mc=
codeberg.org/gruf/go-kv v1.5.2/go.mod h1:al6ASW/2CbGqz2YcM8B00tvWnVi1bU1CH3HYs5tZxo4=
codeberg.org/gruf/go-logger/v2 v2.2.1 h1:RP2u059EQKTBFV3cN8X6xDxNk2RkzqdgXGKflKqB7Oc=
codeberg.org/gruf/go-logger/v2 v2.2.1/go.mod h1:m/vBfG5jNUmYXI8Hg9aVSk7Pn8YgEBITQB/B/CzdRss=
codeberg.org/gruf/go-mangler v1.1.1 h1:Ci56Le8PKrfESTNYjIZu3AoqAf/O2mX8BTWC6EuN7HA=
codeberg.org/gruf/go-mangler v1.1.1/go.mod h1:z6nL/uyp1AnEFPMD7YO3J/kQTY6fBPlIjwhqBMyPExo=
codeberg.org/gruf/go-mangler v1.2.2 h1:fisdWXa6dW4p1uYdbz5Of3R4lDDFPuRqKavGI9O03Rc=
codeberg.org/gruf/go-mangler v1.2.2/go.mod h1:X/7URkFhLBAVKkTxmqF11Oxw3A6pSSxgPeHssQaiq28=
codeberg.org/gruf/go-maps v1.0.3 h1:VDwhnnaVNUIy5O93CvkcE2IZXnMB1+IJjzfop9V12es=
codeberg.org/gruf/go-maps v1.0.3/go.mod h1:D5LNDxlC9rsDuVQVM6JObaVGAdHB6g2dTdOdkh1aXWA=
codeberg.org/gruf/go-mutexes v1.1.4 h1:HWaIZavPL92SBJxNOlIXAmAT5CB2hAs72/lBN31jnzM=
codeberg.org/gruf/go-mutexes v1.1.4/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8=
codeberg.org/gruf/go-pools v1.1.0 h1:LbYP24eQLl/YI1fSU2pafiwhGol1Z1zPjRrMsXpF88s=
codeberg.org/gruf/go-pools v1.1.0/go.mod h1:ZMYpt/DjQWYC3zFD3T97QWSFKs62zAUGJ/tzvgB9D68=
codeberg.org/gruf/go-runners v1.3.1 h1:d/OQMMMiA6yPaDSbSr0/Jc+lucWmm7AiAZjWffpNKVQ=
codeberg.org/gruf/go-runners v1.3.1/go.mod h1:rl0EdZNozkRMb21DAtOL5L4oTfmslYQdZgq2RMMc/H4=
codeberg.org/gruf/go-sched v1.1.1 h1:YtLSQhpypzuD3HTup5oF7LLWB79gTL4nqW06kH4Vwks=
codeberg.org/gruf/go-sched v1.1.1/go.mod h1:SRcdP/5qim+EBT3n3r4aUra1C30yPqV4OJOXuqvgdQM=
codeberg.org/gruf/go-runners v1.4.0 h1:977nVjigAdH95+VAB/a6tyBJOKk99e60h+mfHzBs/n8=
codeberg.org/gruf/go-runners v1.4.0/go.mod h1:kUM6GYL7dC+f9Sc/XuwdvB/mB4FuI4fJFb150ADMsmw=
codeberg.org/gruf/go-sched v1.2.0 h1:utZl/7srVcbh30rFw42LC2/cMtak4UZRxtIOt/5riNA=
codeberg.org/gruf/go-sched v1.2.0/go.mod h1:v4ueWq+fAtAw9JYt4aFXvadI1YoOqofgHQgszRYuslA=
codeberg.org/gruf/go-store/v2 v2.0.10 h1:/2iZ4j29A//EhM3XziJP6SxtdIcaAyPmJEv31+6XD8g=
codeberg.org/gruf/go-store/v2 v2.0.10/go.mod h1:KMRE173S6W2sGhuIa4jY/OPIO65F9++7rmWTfZ4xTeY=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
@ -108,7 +108,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/abema/go-mp4 v0.9.0 h1:WFkzn0J8uYTQ2MIWfgCaFHRB3VDkird5JncIjuuKjGI=
github.com/abema/go-mp4 v0.9.0/go.mod h1:vPl9t5ZK7K0x68jh12/+ECWBCXoWuIDtNgPtU2f04ws=
@ -122,7 +121,6 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
@ -542,7 +540,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=

View file

@ -56,8 +56,8 @@ func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Va
c.lookups = make([]structKey, len(lookups))
for i, lookup := range lookups {
// Generate keyed field info for lookup
c.lookups[i] = genStructKey(lookup, t)
// Create keyed field info for lookup
c.lookups[i] = newStructKey(lookup, t)
}
// Create and initialize underlying cache
@ -159,7 +159,7 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
keyInfo := c.lookups.get(lookup)
// Generate cache key string.
ckey := genKey(keyParts...)
ckey := keyInfo.genKey(keyParts)
// Acquire cache lock
c.cache.Lock()
@ -248,17 +248,17 @@ func (c *Cache[Value]) Store(value Value, store func() error) error {
func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
var res result[Value]
// Get lookup key type by name.
keyType := c.lookups.get(lookup)
// Get lookup key info by name.
keyInfo := c.lookups.get(lookup)
// Generate cache key string.
ckey := genKey(keyParts...)
ckey := keyInfo.genKey(keyParts)
// Acquire cache lock
c.cache.Lock()
// Look for primary key for cache key
pkey, ok := keyType.pkeys[ckey]
pkey, ok := keyInfo.pkeys[ckey]
if ok {
// Fetch the result for primary key
@ -275,15 +275,15 @@ func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
// Invalidate will invalidate any result from the cache found under given lookup and key parts.
func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) {
// Get lookup key type by name.
keyType := c.lookups.get(lookup)
// Get lookup key info by name.
keyInfo := c.lookups.get(lookup)
// Generate cache key string.
ckey := genKey(keyParts...)
ckey := keyInfo.genKey(keyParts)
// Look for primary key for cache key
c.cache.Lock()
pkey, ok := keyType.pkeys[ckey]
pkey, ok := keyInfo.pkeys[ckey]
c.cache.Unlock()
if !ok {

View file

@ -1,6 +1,7 @@
package result
import (
"fmt"
"reflect"
"strings"
"sync"
@ -51,10 +52,10 @@ func (sk structKeys) generate(a any) []cacheKey {
buf.B = buf.B[:0]
// Append each field value to buffer.
for _, idx := range sk[i].fields {
fv := v.Field(idx)
for _, field := range sk[i].fields {
fv := v.Field(field.index)
fi := fv.Interface()
buf.B = mangler.Append(buf.B, fi)
buf.B = field.mangle(buf.B, fi)
buf.B = append(buf.B, '.')
}
@ -123,17 +124,58 @@ type structKey struct {
// fields is a slice of runtime struct field
// indices, of the fields encompassed by this key.
fields []int
fields []structField
// pkeys is a lookup of stored struct key values
// to the primary cache lookup key (int64).
pkeys map[string]int64
}
// genStructKey will generate a structKey{} information object for user-given lookup
type structField struct {
// index is the reflect index of this struct field.
index int
// mangle is the mangler function for
// serializing values of this struct field.
mangle mangler.Mangler
}
// genKey generates a cache key string for given key parts (i.e. serializes them using "go-mangler").
func (sk structKey) genKey(parts []any) string {
// Check this expected no. key parts.
if len(parts) != len(sk.fields) {
panic(fmt.Sprintf("incorrect no. key parts provided: want=%d received=%d", len(parts), len(sk.fields)))
}
// Acquire byte buffer
buf := getBuf()
defer putBuf(buf)
buf.Reset()
// Encode each key part
for i, part := range parts {
buf.B = sk.fields[i].mangle(buf.B, part)
buf.B = append(buf.B, '.')
}
// Drop last '.'
buf.Truncate(1)
// Return string copy
return string(buf.B)
}
// newStructKey will generate a structKey{} information object for user-given lookup
// key information, and the receiving generic paramter's type information. Panics on error.
func genStructKey(lk Lookup, t reflect.Type) structKey {
var zeros []any
func newStructKey(lk Lookup, t reflect.Type) structKey {
var (
sk structKey
zeros []any
)
// Set the lookup name
sk.name = lk.Name
// Split dot-separated lookup to get
// the individual struct field names
@ -142,8 +184,8 @@ func genStructKey(lk Lookup, t reflect.Type) structKey {
panic("no key fields specified")
}
// Pre-allocate slice of expected length
fields := make([]int, len(names))
// Allocate the mangler and field indices slice.
sk.fields = make([]structField, len(names))
for i, name := range names {
// Get field info for given name
@ -158,60 +200,30 @@ func genStructKey(lk Lookup, t reflect.Type) structKey {
}
// Set the runtime field index
fields[i] = ft.Index[0]
sk.fields[i].index = ft.Index[0]
// Allocate new instance of field
v := reflect.New(ft.Type)
v = v.Elem()
// Fetch mangler for field type.
sk.fields[i].mangle = mangler.Get(ft.Type)
if !lk.AllowZero {
// Append the zero value interface
zeros = append(zeros, v.Interface())
}
}
var zvalue string
if len(zeros) > 0 {
// Generate zero value string
zvalue = genKey(zeros...)
sk.zero = sk.genKey(zeros)
}
return structKey{
name: lk.Name,
zero: zvalue,
fields: fields,
pkeys: make(map[string]int64),
}
}
// Allocate primary lookup map
sk.pkeys = make(map[string]int64)
// genKey generates a cache key for given key values.
func genKey(parts ...any) string {
if len(parts) == 0 {
// Panic to prevent annoying usecase
// where user forgets to pass lookup
// and instead only passes a key part,
// e.g. cache.Get("key")
// which then always returns false.
panic("no key parts provided")
}
// Acquire byte buffer
buf := getBuf()
defer putBuf(buf)
buf.Reset()
// Encode each key part
for _, part := range parts {
buf.B = mangler.Append(buf.B, part)
buf.B = append(buf.B, '.')
}
// Drop last '.'
buf.Truncate(1)
// Return string copy
return string(buf.B)
return sk
}
// isExported checks whether function name is exported.

View file

@ -6,15 +6,15 @@
"codeberg.org/gruf/go-sched"
)
// scheduler is the global cache runtime scheduler
// for handling regular cache evictions.
// scheduler is the global cache runtime
// scheduler for handling cache evictions.
var scheduler sched.Scheduler
// schedule will given sweep routine to the global scheduler, and start global scheduler.
func schedule(sweep func(time.Time), freq time.Duration) func() {
if !scheduler.Running() {
// ensure running
_ = scheduler.Start()
// ensure sched running
_ = scheduler.Start(nil)
}
return scheduler.Schedule(sched.NewJob(sweep).Every(freq))
}

View file

@ -19,8 +19,6 @@ pkg: codeberg.org/gruf/go-mangler
cpu: 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz
BenchmarkMangle
BenchmarkMangle-8 723278 1593 ns/op 1168 B/op 120 allocs/op
BenchmarkMangleHash
BenchmarkMangleHash-8 405380 2788 ns/op 4496 B/op 214 allocs/op
BenchmarkJSON
BenchmarkJSON-8 199360 6116 ns/op 4243 B/op 142 allocs/op
BenchmarkBinary

View file

@ -118,20 +118,16 @@ func loadReflect(t reflect.Type) (Mangler, rMangler) {
reflect.Uintptr:
return mangle_platform_int, nil
case reflect.Int8,
reflect.Uint8:
case reflect.Int8, reflect.Uint8:
return mangle_8bit, nil
case reflect.Int16,
reflect.Uint16:
case reflect.Int16, reflect.Uint16:
return mangle_16bit, nil
case reflect.Int32,
reflect.Uint32:
case reflect.Int32, reflect.Uint32:
return mangle_32bit, nil
case reflect.Int64,
reflect.Uint64:
case reflect.Int64, reflect.Uint64:
return mangle_64bit, nil
case reflect.Float32:
@ -214,20 +210,16 @@ func loadReflectKnownPtr(et reflect.Type) Mangler {
reflect.Uintptr:
return mangle_platform_int_ptr
case reflect.Int8,
reflect.Uint8:
case reflect.Int8, reflect.Uint8:
return mangle_8bit_ptr
case reflect.Int16,
reflect.Uint16:
case reflect.Int16, reflect.Uint16:
return mangle_16bit_ptr
case reflect.Int32,
reflect.Uint32:
case reflect.Int32, reflect.Uint32:
return mangle_32bit_ptr
case reflect.Int64,
reflect.Uint64:
case reflect.Int64, reflect.Uint64:
return mangle_64bit_ptr
case reflect.Float32:
@ -261,20 +253,16 @@ func loadReflectKnownSlice(et reflect.Type) Mangler {
reflect.Uintptr:
return mangle_platform_int_slice
case reflect.Int8,
reflect.Uint8:
case reflect.Int8, reflect.Uint8:
return mangle_8bit_slice
case reflect.Int16,
reflect.Uint16:
case reflect.Int16, reflect.Uint16:
return mangle_16bit_slice
case reflect.Int32,
reflect.Uint32:
case reflect.Int32, reflect.Uint32:
return mangle_32bit_slice
case reflect.Int64,
reflect.Uint64:
case reflect.Int64, reflect.Uint64:
return mangle_64bit_slice
case reflect.Float32:
@ -305,7 +293,7 @@ func loadReflectArray(et reflect.Type) rMangler {
return nil
}
// loadReflectMap ...
// loadReflectMap loads an rMangler function for a map of given key and value types.
func loadReflectMap(kt, vt reflect.Type) rMangler {
var kmng, vmng rMangler

View file

@ -3,15 +3,13 @@
import (
"encoding/binary"
"reflect"
"sync"
"unsafe"
"github.com/cespare/xxhash"
"github.com/cornelk/hashmap"
)
var (
// manglers is a map of runtime type ptrs => Mangler functions.
manglers = hashmap.New[uintptr, Mangler]()
manglers = sync.Map{}
// bin is a short-hand for our chosen byteorder encoding.
bin = binary.LittleEndian
@ -36,12 +34,38 @@ type Mangled interface {
type rMangler func(buf []byte, value reflect.Value) []byte
// Get will fetch the Mangler function for given runtime type.
func Get(t reflect.Type) (Mangler, bool) {
if t == nil {
return nil, false
}
// Note that the returned mangler will be a no-op in the case
// that an incorrect type is passed as the value argument.
func Get(t reflect.Type) Mangler {
var mng Mangler
// Get raw runtime type ptr
uptr := uintptr(iface_value(t))
return manglers.Get(uptr)
// Look for a cached mangler
v, ok := manglers.Load(uptr)
if !ok {
// Load mangler function
mng = loadMangler(nil, t)
} else {
// cast cached value
mng = v.(Mangler)
}
return func(buf []byte, value any) []byte {
// Type check passed value against original arg type.
if vt := reflect.TypeOf(value); vt != t {
return buf
}
// First write the type ptr (this adds
// a unique prefix for each runtime type).
buf = mangle_platform_int(buf, uptr)
// Finally, mangle value
return mng(buf, value)
}
}
// Register will register the given Mangler function for use with vars of given runtime type. This allows
@ -57,17 +81,19 @@ func Register(t reflect.Type, m Mangler) {
uptr := uintptr(iface_value(t))
// Ensure this is a unique encoder
if _, ok := manglers.Get(uptr); ok {
if _, ok := manglers.Load(uptr); ok {
panic("already registered mangler for type: " + t.String())
}
// Cache this encoder func
manglers.Set(uptr, m)
manglers.Store(uptr, m)
}
// Append will append the mangled form of input value 'a' to buffer 'b'.
// See mangler.String() for more information on mangled output.
func Append(b []byte, a any) []byte {
var mng Mangler
// Get reflect type of 'a'
t := reflect.TypeOf(a)
@ -75,12 +101,15 @@ func Append(b []byte, a any) []byte {
uptr := uintptr(iface_value(t))
// Look for a cached mangler
mng, ok := manglers.Get(uptr)
v, ok := manglers.Load(uptr)
if !ok {
// Load mangler into cache
mng = loadMangler(a, t)
manglers.Set(uptr, mng)
mng = loadMangler(nil, t)
manglers.Store(uptr, mng)
} else {
// cast cached value
mng = v.(Mangler)
}
// First write the type ptr (this adds
@ -123,10 +152,3 @@ func String(a any) string {
b := Append(make([]byte, 0, 32), a)
return *(*string)(unsafe.Pointer(&b))
}
// Hash returns the xxHash digest of the result of mangler.Append(nil, 'a').
func Hash(a any) uint64 {
b := make([]byte, 0, 32)
b = Append(b, a)
return xxhash.Sum64(b)
}

View file

@ -12,6 +12,11 @@
return ctx
}()
// Closed returns an always closed context.
func Closed() context.Context {
return closedctx
}
// ContextWithCancel returns a new context.Context impl with cancel.
func ContextWithCancel() (context.Context, context.CancelFunc) {
ctx := make(cancelctx)
@ -41,3 +46,18 @@ func (ctx cancelctx) Err() error {
func (cancelctx) Value(key interface{}) interface{} {
return nil
}
func (ctx cancelctx) String() string {
var state string
select {
case <-ctx:
state = "closed"
default:
state = "open"
}
return "cancelctx{state:" + state + "}"
}
func (ctx cancelctx) GoString() string {
return "runners." + ctx.String()
}

View file

@ -2,8 +2,12 @@
import (
"context"
"fmt"
"os"
"runtime"
"sync"
"codeberg.org/gruf/go-errors/v2"
)
// WorkerFunc represents a function processable by a worker in WorkerPool. Note
@ -26,17 +30,22 @@ func (pool *WorkerPool) Start(workers int, queue int) bool {
return false
}
if workers < 1 {
// Use $GOMAXPROCS as default worker count
if workers <= 0 {
// Use $GOMAXPROCS as default.
workers = runtime.GOMAXPROCS(0)
}
if queue < 0 {
// Set a reasonable queue default
queue = workers * 2
// Use reasonable queue default.
queue = workers * 10
}
// Allocate pool queue of given size
// Allocate pool queue of given size.
//
// This MUST be set BEFORE we return and NOT in
// the launched goroutine, or there is a risk that
// the pool may appear as closed for a short time
// until the main goroutine has been entered.
fns := make(chan WorkerFunc, queue)
pool.fns = fns
@ -53,50 +62,49 @@ func (pool *WorkerPool) Start(workers int, queue int) bool {
// Start goroutine worker functions
for i := 0; i < workers; i++ {
go func() {
// Trigger start / stop
wait.Add(1)
go func() {
defer wait.Done()
// Keep workers running on panic
for !workerstart(ctx, fns) {
// Run worker function.
for !worker_run(ctx, fns) {
// retry on panic
}
}()
}
// Set GC finalizer to stop pool on dealloc
// Set GC finalizer to stop pool on dealloc.
runtime.SetFinalizer(pool, func(pool *WorkerPool) {
pool.svc.Stop()
_ = pool.svc.Stop()
})
// Wait on ctx
<-ctx.Done()
// Stop all workers
close(pool.fns)
// Drain function queue.
//
// All functions in the queue MUST be
// run, so we pass them a closed context.
//
// This mainly allows us to block until
// the function queue is empty, as worker
// functions will also continue draining in
// the background with the (now) closed ctx.
for !drain_queue(fns) {
// retry on panic
}
// Now the queue is empty, we can
// safely close the channel signalling
// all of the workers to return.
close(fns)
wait.Wait()
}()
return true
}
// workerstart is the main worker runner routine, accepting functions from 'fns' until it is closed.
func workerstart(ctx context.Context, fns <-chan WorkerFunc) bool {
// Recover and drop any panic
defer func() { recover() }()
for {
// Wait on next func
fn, ok := <-fns
if !ok {
return true
}
// Run with ctx
fn(ctx)
}
}
// Stop will stop the WorkerPool management loop, blocking until stopped.
func (pool *WorkerPool) Stop() bool {
return pool.svc.Stop()
@ -124,22 +132,24 @@ func (pool *WorkerPool) Enqueue(fn WorkerFunc) {
// EnqueueCtx is functionally identical to WorkerPool.Enqueue() but returns early in the
// case that caller provided <-ctx.Done() is closed, WITHOUT running the WorkerFunc.
func (pool *WorkerPool) EnqueueCtx(ctx context.Context, fn WorkerFunc) {
func (pool *WorkerPool) EnqueueCtx(ctx context.Context, fn WorkerFunc) bool {
// Check valid fn
if fn == nil {
return
return false
}
select {
// Caller ctx cancelled
case <-ctx.Done():
return false
// Pool ctx cancelled
case <-pool.svc.Done():
fn(closedctx)
return false
// Placed fn in queue
case pool.fns <- fn:
return true
}
}
@ -167,5 +177,54 @@ func (pool *WorkerPool) EnqueueNow(fn WorkerFunc) bool {
// Queue returns the number of currently queued WorkerFuncs.
func (pool *WorkerPool) Queue() int {
return len(pool.fns)
var l int
pool.svc.While(func() {
l = len(pool.fns)
})
return l
}
// worker_run is the main worker routine, accepting functions from 'fns' until it is closed.
func worker_run(ctx context.Context, fns <-chan WorkerFunc) bool {
defer func() {
// Recover and drop any panic
if r := recover(); r != nil {
const msg = "worker_run: recovered panic: %v\n\n%s\n"
fmt.Fprintf(os.Stderr, msg, r, errors.GetCallers(2, 10))
}
}()
for {
// Wait on next func
fn, ok := <-fns
if !ok {
return true
}
// Run with ctx
fn(ctx)
}
}
// drain_queue will drain and run all functions in worker queue, passing in a closed context.
func drain_queue(fns <-chan WorkerFunc) bool {
defer func() {
// Recover and drop any panic
if r := recover(); r != nil {
const msg = "drain_queue: recovered panic: %v\n\n%s\n"
fmt.Fprintf(os.Stderr, msg, r, errors.GetCallers(2, 10))
}
}()
for {
select {
// Run with closed ctx
case fn := <-fns:
fn(closedctx)
// Queue is empty
default:
return true
}
}
}

View file

@ -9,10 +9,9 @@
// changes and preventing multiple instances running. Also providing service state information.
type Service struct {
state uint32 // 0=stopped, 1=running, 2=stopping
wait sync.Mutex // wait is the mutex used as a single-entity wait-group, i.e. just a "wait" :p
cncl context.CancelFunc // cncl is the cancel function set for the current context
ctx context.Context // ctx is the current context for running function (or nil if not running)
mu sync.Mutex // mu protects state changes
mutex sync.Mutex // mutext protects overall state changes
wait sync.Mutex // wait is used as a single-entity wait-group, only ever locked within 'mutex'
ctx cancelctx // ctx is the current context for running function (or nil if not running)
}
// Run will run the supplied function until completion, using given context to propagate cancel.
@ -29,13 +28,12 @@ func (svc *Service) Run(fn func(context.Context)) bool {
svc.wait.Unlock()
// ensure stopped
svc.Stop()
_ = svc.Stop()
}()
// Run user func
if fn != nil {
// Run
fn(ctx)
}
return true
}
@ -54,13 +52,11 @@ func (svc *Service) GoRun(fn func(context.Context)) bool {
svc.wait.Unlock()
// ensure stopped
svc.Stop()
_ = svc.Stop()
}()
// Run user func
if fn != nil {
// Run
fn(ctx)
}
}()
return true
@ -70,14 +66,14 @@ func (svc *Service) GoRun(fn func(context.Context)) bool {
// returns false if not running, and true only after Service is fully stopped.
func (svc *Service) Stop() bool {
// Attempt to stop the svc
cncl, ok := svc.doStop()
ctx, ok := svc.doStop()
if !ok {
return false
}
defer func() {
// Get svc lock
svc.mu.Lock()
svc.mutex.Lock()
// Wait until stopped
svc.wait.Lock()
@ -85,53 +81,65 @@ func (svc *Service) Stop() bool {
// Reset the svc
svc.ctx = nil
svc.cncl = nil
svc.state = 0
svc.mu.Unlock()
svc.mutex.Unlock()
}()
cncl() // cancel ctx
// Cancel ctx
close(ctx)
return true
}
// While allows you to execute given function guaranteed within current
// service state. Please note that this will hold the underlying service
// state change mutex open while executing the function.
func (svc *Service) While(fn func()) {
// Protect state change
svc.mutex.Lock()
defer svc.mutex.Unlock()
// Run
fn()
}
// doStart will safely set Service state to started, returning a ptr to this context insance.
func (svc *Service) doStart() (context.Context, bool) {
func (svc *Service) doStart() (cancelctx, bool) {
// Protect startup
svc.mu.Lock()
svc.mutex.Lock()
if svc.state != 0 /* not stopped */ {
svc.mu.Unlock()
svc.mutex.Unlock()
return nil, false
}
// state started
svc.state = 1
// Take our own ptr
var ctx context.Context
if svc.ctx == nil {
// Context required allocating
svc.ctx, svc.cncl = ContextWithCancel()
// this will only have been allocated
// if svc.Done() was already called.
svc.ctx = make(cancelctx)
}
// Start the waiter
svc.wait.Lock()
// Set our ptr + unlock
ctx = svc.ctx
svc.mu.Unlock()
// Take our own ptr
// and unlock state
ctx := svc.ctx
svc.mutex.Unlock()
return ctx, true
}
// doStop will safely set Service state to stopping, returning a ptr to this cancelfunc instance.
func (svc *Service) doStop() (context.CancelFunc, bool) {
func (svc *Service) doStop() (cancelctx, bool) {
// Protect stop
svc.mu.Lock()
svc.mutex.Lock()
if svc.state != 1 /* not started */ {
svc.mu.Unlock()
svc.mutex.Unlock()
return nil, false
}
@ -140,17 +148,17 @@ func (svc *Service) doStop() (context.CancelFunc, bool) {
// Take our own ptr
// and unlock state
cncl := svc.cncl
svc.mu.Unlock()
ctx := svc.ctx
svc.mutex.Unlock()
return cncl, true
return ctx, true
}
// Running returns if Service is running (i.e. state NOT stopped / stopping).
func (svc *Service) Running() bool {
svc.mu.Lock()
svc.mutex.Lock()
state := svc.state
svc.mu.Unlock()
svc.mutex.Unlock()
return (state == 1)
}
@ -159,28 +167,27 @@ func (svc *Service) Running() bool {
func (svc *Service) Done() <-chan struct{} {
var done <-chan struct{}
svc.mu.Lock()
svc.mutex.Lock()
switch svc.state {
// stopped
// (here we create a new context so that the
// returned 'done' channel here will still
// be valid for when Service is next started)
case 0:
if svc.ctx == nil {
// need to allocate new context
svc.ctx, svc.cncl = ContextWithCancel()
// here we create a new context so that the
// returned 'done' channel here will still
// be valid for when Service is next started.
svc.ctx = make(cancelctx)
}
done = svc.ctx.Done()
done = svc.ctx
// started
case 1:
done = svc.ctx.Done()
done = svc.ctx
// stopping
case 2:
done = svc.ctx.Done()
done = svc.ctx
}
svc.mu.Unlock()
svc.mutex.Unlock()
return done
}

View file

@ -33,10 +33,11 @@ type Scheduler struct {
jch chan interface{} // jch accepts either Jobs or job IDs to notify new/removed jobs
svc runners.Service // svc manages the main scheduler routine
jid atomic.Uint64 // jid is used to iteratively generate unique IDs for jobs
rgo func(func()) // goroutine runner, allows using goroutine pool to launch jobs
}
// Start will attempt to start the Scheduler. Immediately returns false if the Service is already running, and true after completed run.
func (sch *Scheduler) Start() bool {
func (sch *Scheduler) Start(gorun func(func())) bool {
var block sync.Mutex
// Use mutex to synchronize between started
@ -49,14 +50,19 @@ func (sch *Scheduler) Start() bool {
// Create Scheduler job channel
sch.jch = make(chan interface{})
// Unlock start routine
block.Unlock()
// Set goroutine runner function
if sch.rgo = gorun; sch.rgo == nil {
sch.rgo = func(f func()) { go f() }
}
// Set GC finalizer to ensure scheduler stopped
runtime.SetFinalizer(sch, func(sch *Scheduler) {
_ = sch.Stop()
})
// Unlock start routine
block.Unlock()
// Enter main loop
sch.run(ctx)
})
@ -87,7 +93,7 @@ func (sch *Scheduler) Schedule(job *Job) (cancel func()) {
panic("nil job")
// Check we are running
case sch.jch == nil:
case !sch.Running():
panic("scheduler not running")
}
@ -142,21 +148,6 @@ func (sch *Scheduler) run(ctx context.Context) {
}
)
for {
select {
// Handle received job/id
case v := <-sch.jch:
sch.handle(v)
continue
// No more
default:
}
// Done
break
}
// Create a stopped timer
timer = time.NewTimer(1)
<-timer.C
@ -256,8 +247,10 @@ func (sch *Scheduler) schedule(now time.Time) {
return
}
// Pass job to runner
go job.Run(now)
// Pass to runner
sch.rgo(func() {
job.Run(now)
})
// Update the next call time
next := job.timing.Next(now)

View file

@ -1,22 +0,0 @@
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,50 +0,0 @@
# xxhash
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
The API is very small, taking its cue from the other hashing packages in the
standard library:
$ go doc github.com/cespare/xxhash !
package xxhash // import "github.com/cespare/xxhash"
Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
at http://cyan4973.github.io/xxHash/.
func New() hash.Hash64
func Sum64(b []byte) uint64
func Sum64String(s string) uint64
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64 against another popular Go XXH64 implementation,
[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
| input size | OneOfOne | cespare (purego) | cespare |
| --- | --- | --- | --- |
| 5 B | 416 MB/s | 720 MB/s | 872 MB/s |
| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s |
| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s |
| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s |
These numbers were generated with:
```
$ go test -benchtime 10s -bench '/OneOfOne,'
$ go test -tags purego -benchtime 10s -bench '/xxhash,'
$ go test -benchtime 10s -bench '/xxhash,'
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)

View file

@ -1,14 +0,0 @@
// +build !go1.9
package xxhash
// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }

View file

@ -1,14 +0,0 @@
// +build go1.9
package xxhash
import "math/bits"
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

View file

@ -1,168 +0,0 @@
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
// at http://cyan4973.github.io/xxHash/.
package xxhash
import (
"encoding/binary"
"hash"
)
const (
prime1 uint64 = 11400714785074694791
prime2 uint64 = 14029467366897019727
prime3 uint64 = 1609587929392839161
prime4 uint64 = 9650029242287828579
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
type xxh struct {
v1 uint64
v2 uint64
v3 uint64
v4 uint64
total int
mem [32]byte
n int // how much of mem is used
}
// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
func New() hash.Hash64 {
var x xxh
x.Reset()
return &x
}
func (x *xxh) Reset() {
x.n = 0
x.total = 0
x.v1 = prime1v + prime2
x.v2 = prime2
x.v3 = 0
x.v4 = -prime1v
}
func (x *xxh) Size() int { return 8 }
func (x *xxh) BlockSize() int { return 32 }
// Write adds more data to x. It always returns len(b), nil.
func (x *xxh) Write(b []byte) (n int, err error) {
n = len(b)
x.total += len(b)
if x.n+len(b) < 32 {
// This new data doesn't even fill the current block.
copy(x.mem[x.n:], b)
x.n += len(b)
return
}
if x.n > 0 {
// Finish off the partial block.
copy(x.mem[x.n:], b)
x.v1 = round(x.v1, u64(x.mem[0:8]))
x.v2 = round(x.v2, u64(x.mem[8:16]))
x.v3 = round(x.v3, u64(x.mem[16:24]))
x.v4 = round(x.v4, u64(x.mem[24:32]))
b = b[32-x.n:]
x.n = 0
}
if len(b) >= 32 {
// One or more full blocks left.
b = writeBlocks(x, b)
}
// Store any remaining partial block.
copy(x.mem[:], b)
x.n = len(b)
return
}
func (x *xxh) Sum(b []byte) []byte {
s := x.Sum64()
return append(
b,
byte(s>>56),
byte(s>>48),
byte(s>>40),
byte(s>>32),
byte(s>>24),
byte(s>>16),
byte(s>>8),
byte(s),
)
}
func (x *xxh) Sum64() uint64 {
var h uint64
if x.total >= 32 {
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = x.v3 + prime5
}
h += uint64(x.total)
i, end := 0, x.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(x.mem[i:i+8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(x.mem[i:i+4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for i < end {
h ^= uint64(x.mem[i]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
func round(acc, input uint64) uint64 {
acc += input * prime2
acc = rol31(acc)
acc *= prime1
return acc
}
func mergeRound(acc, val uint64) uint64 {
val = round(0, val)
acc ^= val
acc = acc*prime1 + prime4
return acc
}

View file

@ -1,12 +0,0 @@
// +build !appengine
// +build gc
// +build !purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
//
//go:noescape
func Sum64(b []byte) uint64
func writeBlocks(x *xxh, b []byte) []byte

View file

@ -1,233 +0,0 @@
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Register allocation:
// AX h
// CX pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// R15 prime4v
// round reads from and advances the buffer pointer in CX.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (CX), R12 \
ADDQ $8, CX \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ R15, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), R15
// Load slice.
MOVQ b_base+0(FP), CX
MOVQ b_len+8(FP), DX
LEAQ (CX)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
// Check whether we have at least one block.
CMPQ DX, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
// Loop until CX > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
JLE blockLoop
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
ADDQ $24, BX
CMPQ CX, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (CX), R8
ADDQ $8, CX
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ R15, AX
CMPQ CX, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ CX, BX
JG singles
MOVL (CX), R8
ADDQ $4, CX
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ CX, BX
JGE finalize
singlesLoop:
MOVBQZX (CX), R12
ADDQ $1, CX
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ CX, BX
JL singlesLoop
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ AX, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the x pointer.
// func writeBlocks(x *xxh, b []byte) []byte
TEXT ·writeBlocks(SB), NOSPLIT, $0-56
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), CX
MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
MOVQ b_len+16(FP), DX
LEAQ (CX)(DX*1), BX
SUBQ $32, BX
// Load vN from x.
MOVQ x+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ CX, BX
JLE blockLoop
// Copy vN back to x.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// Construct return slice.
// NOTE: It's important that we don't construct a slice that has a base
// pointer off the end of the original slice, as in Go 1.7+ this will
// cause runtime crashes. (See discussion in, for example,
// https://github.com/golang/go/issues/16772.)
// Therefore, we calculate the length/cap first, and if they're zero, we
// keep the old base. This is what the compiler does as well if you
// write code like
// b = b[len(b):]
// New length is 32 - (CX - BX) -> BX+32 - CX.
ADDQ $32, BX
SUBQ CX, BX
JZ afterSetBase
MOVQ CX, ret_base+32(FP)
afterSetBase:
MOVQ BX, ret_len+40(FP)
MOVQ BX, ret_cap+48(FP) // set cap == len
RET

View file

@ -1,75 +0,0 @@
// +build !amd64 appengine !gc purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
// x := New()
// x.Write(b)
// return x.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = prime5
}
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
h = rol11(h) * prime1
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func writeBlocks(x *xxh, b []byte) []byte {
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
return b
}

View file

@ -1,10 +0,0 @@
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
package xxhash
// Sum64String computes the 64-bit xxHash digest of s.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}

View file

@ -1,30 +0,0 @@
// +build !appengine
// This file encapsulates usage of unsafe.
// xxhash_safe.go contains the safe implementations.
package xxhash
import (
"reflect"
"unsafe"
)
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
//
// TODO(caleb): Consider removing this if an optimization is ever added to make
// it unnecessary: https://golang.org/issue/2205.
//
// TODO(caleb): We still have a function call; we could instead write Go/asm
// copies of Sum64 for strings to squeeze out a bit more speed.
func Sum64String(s string) uint64 {
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
// for some discussion about this unsafe conversion.
var b []byte
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
bh.Len = len(s)
bh.Cap = len(s)
return Sum64(b)
}

11
vendor/modules.txt vendored
View file

@ -13,7 +13,7 @@ codeberg.org/gruf/go-bytesize
# codeberg.org/gruf/go-byteutil v1.0.2
## explicit; go 1.16
codeberg.org/gruf/go-byteutil
# codeberg.org/gruf/go-cache/v3 v3.2.0
# codeberg.org/gruf/go-cache/v3 v3.2.2
## explicit; go 1.19
codeberg.org/gruf/go-cache/v3
codeberg.org/gruf/go-cache/v3/result
@ -43,7 +43,7 @@ codeberg.org/gruf/go-kv/format
# codeberg.org/gruf/go-logger/v2 v2.2.1
## explicit; go 1.19
codeberg.org/gruf/go-logger/v2/level
# codeberg.org/gruf/go-mangler v1.1.1
# codeberg.org/gruf/go-mangler v1.2.2
## explicit; go 1.19
codeberg.org/gruf/go-mangler
# codeberg.org/gruf/go-maps v1.0.3
@ -55,10 +55,10 @@ codeberg.org/gruf/go-mutexes
# codeberg.org/gruf/go-pools v1.1.0
## explicit; go 1.16
codeberg.org/gruf/go-pools
# codeberg.org/gruf/go-runners v1.3.1
# codeberg.org/gruf/go-runners v1.4.0
## explicit; go 1.14
codeberg.org/gruf/go-runners
# codeberg.org/gruf/go-sched v1.1.1
# codeberg.org/gruf/go-sched v1.2.0
## explicit; go 1.19
codeberg.org/gruf/go-sched
# codeberg.org/gruf/go-store/v2 v2.0.10
@ -79,9 +79,6 @@ github.com/aymerick/douceur/parser
## explicit; go 1.14
github.com/buckket/go-blurhash
github.com/buckket/go-blurhash/base83
# github.com/cespare/xxhash v1.1.0
## explicit
github.com/cespare/xxhash
# github.com/coreos/go-oidc/v3 v3.4.0
## explicit; go 1.14
github.com/coreos/go-oidc/v3/oidc