[chore]: Bump codeberg.org/gruf/go-mutexes from 1.3.0 to 1.3.1 (#2387)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-11-27 13:20:18 +00:00 committed by GitHub
parent 1fa206c230
commit d7e35f6bc9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 98 additions and 74 deletions

2
go.mod
View file

@ -14,7 +14,7 @@ require (
codeberg.org/gruf/go-iotools v0.0.0-20230811115124-5d4223615a7f codeberg.org/gruf/go-iotools v0.0.0-20230811115124-5d4223615a7f
codeberg.org/gruf/go-kv v1.6.4 codeberg.org/gruf/go-kv v1.6.4
codeberg.org/gruf/go-logger/v2 v2.2.1 codeberg.org/gruf/go-logger/v2 v2.2.1
codeberg.org/gruf/go-mutexes v1.3.0 codeberg.org/gruf/go-mutexes v1.3.1
codeberg.org/gruf/go-runners v1.6.1 codeberg.org/gruf/go-runners v1.6.1
codeberg.org/gruf/go-sched v1.2.3 codeberg.org/gruf/go-sched v1.2.3
codeberg.org/gruf/go-store/v2 v2.2.4 codeberg.org/gruf/go-store/v2 v2.2.4

4
go.sum
View file

@ -67,8 +67,8 @@ codeberg.org/gruf/go-mangler v1.2.3 h1:sj0dey2lF5GRQL7fXmCY0wPNaI5JrROiThb0VDbzF
codeberg.org/gruf/go-mangler v1.2.3/go.mod h1:X/7URkFhLBAVKkTxmqF11Oxw3A6pSSxgPeHssQaiq28= codeberg.org/gruf/go-mangler v1.2.3/go.mod h1:X/7URkFhLBAVKkTxmqF11Oxw3A6pSSxgPeHssQaiq28=
codeberg.org/gruf/go-maps v1.0.3 h1:VDwhnnaVNUIy5O93CvkcE2IZXnMB1+IJjzfop9V12es= codeberg.org/gruf/go-maps v1.0.3 h1:VDwhnnaVNUIy5O93CvkcE2IZXnMB1+IJjzfop9V12es=
codeberg.org/gruf/go-maps v1.0.3/go.mod h1:D5LNDxlC9rsDuVQVM6JObaVGAdHB6g2dTdOdkh1aXWA= codeberg.org/gruf/go-maps v1.0.3/go.mod h1:D5LNDxlC9rsDuVQVM6JObaVGAdHB6g2dTdOdkh1aXWA=
codeberg.org/gruf/go-mutexes v1.3.0 h1:EJXLL1UCit/ZJtTZ/Q9MMFO5c8iCwS4bIesXu1CKGpQ= codeberg.org/gruf/go-mutexes v1.3.1 h1:8ibAjWwx08GJSq5R+lM9nwtJw2aAhMPKSXbfJ9EpDsA=
codeberg.org/gruf/go-mutexes v1.3.0/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8= codeberg.org/gruf/go-mutexes v1.3.1/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8=
codeberg.org/gruf/go-runners v1.6.1 h1:0KNiEfGnmNUs9intqxEAWqIKUyxVOmYTtn3kPVOHsjQ= codeberg.org/gruf/go-runners v1.6.1 h1:0KNiEfGnmNUs9intqxEAWqIKUyxVOmYTtn3kPVOHsjQ=
codeberg.org/gruf/go-runners v1.6.1/go.mod h1:QRcSExqXX8DM0rm8Xs6qX7baOzyvw0JIe4mu3TsQT+Y= codeberg.org/gruf/go-runners v1.6.1/go.mod h1:QRcSExqXX8DM0rm8Xs6qX7baOzyvw0JIe4mu3TsQT+Y=
codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk= codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk=

View file

@ -2,6 +2,7 @@
import ( import (
"sync" "sync"
"sync/atomic"
"unsafe" "unsafe"
) )
@ -24,11 +25,11 @@
// and performs self-eviction of keys. // and performs self-eviction of keys.
// //
// Under the hood this is achieved using a single mutex for the // Under the hood this is achieved using a single mutex for the
// map, state tracking for individual keys, and some simple waitgroup // map, state tracking for individual keys, and some sync.Cond{}
// type structures to park / block goroutines waiting for keys. // like structures for sleeping / awaking awaiting goroutines.
type MutexMap struct { type MutexMap struct {
mapmu sync.Mutex mapmu sync.Mutex
mumap map[string]*rwmutexish mumap map[string]*rwmutex
mupool rwmutexPool mupool rwmutexPool
count uint32 count uint32
} }
@ -36,7 +37,7 @@ type MutexMap struct {
// checkInit ensures MutexMap is initialized (UNSAFE). // checkInit ensures MutexMap is initialized (UNSAFE).
func (mm *MutexMap) checkInit() { func (mm *MutexMap) checkInit() {
if mm.mumap == nil { if mm.mumap == nil {
mm.mumap = make(map[string]*rwmutexish) mm.mumap = make(map[string]*rwmutex)
} }
} }
@ -82,7 +83,7 @@ func (mm *MutexMap) lock(key string, lt uint8) func() {
} }
} }
func (mm *MutexMap) unlock(key string, mu *rwmutexish) { func (mm *MutexMap) unlock(key string, mu *rwmutex) {
// Get map lock. // Get map lock.
mm.mapmu.Lock() mm.mapmu.Lock()
@ -109,12 +110,12 @@ func (mm *MutexMap) unlock(key string, mu *rwmutexish) {
// rwmutexPool is a very simply memory rwmutexPool. // rwmutexPool is a very simply memory rwmutexPool.
type rwmutexPool struct { type rwmutexPool struct {
current []*rwmutexish current []*rwmutex
victim []*rwmutexish victim []*rwmutex
} }
// Acquire will returns a rwmutexState from rwmutexPool (or alloc new). // Acquire will returns a rwmutexState from rwmutexPool (or alloc new).
func (p *rwmutexPool) Acquire() *rwmutexish { func (p *rwmutexPool) Acquire() *rwmutex {
// First try the current queue // First try the current queue
if l := len(p.current) - 1; l >= 0 { if l := len(p.current) - 1; l >= 0 {
mu := p.current[l] mu := p.current[l]
@ -130,12 +131,12 @@ func (p *rwmutexPool) Acquire() *rwmutexish {
} }
// Lastly, alloc new. // Lastly, alloc new.
mu := new(rwmutexish) mu := new(rwmutex)
return mu return mu
} }
// Release places a sync.rwmutexState back in the rwmutexPool. // Release places a sync.rwmutexState back in the rwmutexPool.
func (p *rwmutexPool) Release(mu *rwmutexish) { func (p *rwmutexPool) Release(mu *rwmutex) {
p.current = append(p.current, mu) p.current = append(p.current, mu)
} }
@ -146,20 +147,28 @@ func (p *rwmutexPool) GC() {
p.victim = current p.victim = current
} }
// rwmutexish is a RW mutex (ish), i.e. the representation // rwmutex represents a RW mutex when used correctly within
// of one only to be accessed within // a MapMutex. It should ONLY be access when protected by
type rwmutexish struct { // the outer map lock, except for the 'notifyList' which is
tr trigger // a runtime internal structure borrowed from the sync.Cond{}.
ln int32 // no. locks //
wn int32 // no. waiters // this functions very similarly to a sync.Cond{}, but with
lt uint8 // lock type // lock state tracking, and returning on 'Broadcast()' whether
// any goroutines were actually awoken. it also has a less
// confusing API than sync.Cond{} with the outer locking
// mechanism we use, otherwise all Cond{}.L would reference
// the same outer map mutex.
type rwmutex struct {
n notifyList // 'trigger' mechanism
l int32 // no. locks
t uint8 // lock type
} }
// Lock will lock the mutex for given lock type, in the // Lock will lock the mutex for given lock type, in the
// sense that it will update the internal state tracker // sense that it will update the internal state tracker
// accordingly. Return value is true on successful lock. // accordingly. Return value is true on successful lock.
func (mu *rwmutexish) Lock(lt uint8) bool { func (mu *rwmutex) Lock(lt uint8) bool {
switch mu.lt { switch mu.t {
case lockTypeRead: case lockTypeRead:
// already read locked, // already read locked,
// only permit more reads. // only permit more reads.
@ -173,77 +182,83 @@ func (mu *rwmutexish) Lock(lt uint8) bool {
return false return false
default: default:
// Fully unlocked. // Fully unlocked,
mu.lt = lt // set incoming type.
mu.t = lt
} }
// Update // Update
// count. // count.
mu.ln++ mu.l++
return true return true
} }
// Unlock will unlock the mutex, in the sense that // Unlock will unlock the mutex, in the sense that it
// it will update the internal state tracker accordingly. // will update the internal state tracker accordingly.
// On any unlock it will awaken sleeping waiting threads. // On totally unlocked state, it will awaken all
// Returned boolean is if unlocked=true AND waiters=0. // sleeping goroutines waiting on this mutex.
func (mu *rwmutexish) Unlock() bool { func (mu *rwmutex) Unlock() bool {
var ok bool switch mu.l--; {
case mu.l > 0 && mu.t == lockTypeWrite:
switch mu.ln--; {
case mu.ln > 0 && mu.lt == lockTypeWrite:
panic("BUG: multiple writer locks") panic("BUG: multiple writer locks")
case mu.ln < 0: case mu.l < 0:
panic("BUG: negative lock count") panic("BUG: negative lock count")
case mu.ln == 0:
case mu.l == 0:
// Fully unlocked. // Fully unlocked.
mu.lt = 0 mu.t = 0
// Only return true // Awake all blocked goroutines and check
// with no waiters. // for change in the last notified ticket.
ok = (mu.wn == 0) before := atomic.LoadUint32(&mu.n.notify)
runtime_notifyListNotifyAll(&mu.n)
after := atomic.LoadUint32(&mu.n.notify)
// If ticket changed, this indicates
// AT LEAST one goroutine was awoken.
//
// (before != after) => (waiters > 0)
// (before == after) => (waiters = 0)
return (before == after)
default:
// i.e. mutex still
// locked by others.
return false
}
} }
// Awake all waiting // WaitRelock expects a mutex to be passed in, already in the
// goroutines for mu. // locked state. It incr the notifyList waiter count before
mu.tr.Trigger() // unlocking the outer mutex and blocking on notifyList wait.
return ok // On awake it will decr wait count and relock outer mutex.
} func (mu *rwmutex) WaitRelock(outer *sync.Mutex) {
// WaitRelock expects a mutex to be passed in already in // add ourselves to list while still
// the lock state. It incr the rwmutexish waiter count before // under protection of outer map lock.
// unlocking the outer mutex and blocking on internal trigger. t := runtime_notifyListAdd(&mu.n)
// On awake it will relock outer mutex and decr wait count.
func (mu *rwmutexish) WaitRelock(outer *sync.Mutex) { // Finished with
mu.wn++ // outer map lock.
outer.Unlock() outer.Unlock()
mu.tr.Wait()
// Block until awoken by another
// goroutine within mu.Unlock().
runtime_notifyListWait(&mu.n, t)
// Relock!
outer.Lock() outer.Lock()
mu.wn--
} }
// trigger uses the internals of sync.Cond to provide // unused fields left
// a waitgroup type structure (including goroutine parks) // un-named for safety.
// without such a heavy reliance on a delta value.
type trigger struct{ notifyList }
func (t *trigger) Trigger() {
runtime_notifyListNotifyAll(&t.notifyList)
}
func (t *trigger) Wait() {
v := runtime_notifyListAdd(&t.notifyList)
runtime_notifyListWait(&t.notifyList, v)
}
// Approximation of notifyList in runtime/sema.go.
type notifyList struct { type notifyList struct {
wait uint32 _ uint32 // wait uint32
notify uint32 notify uint32 // notify uint32
lock uintptr // key field of the mutex _ uintptr // lock mutex
head unsafe.Pointer _ unsafe.Pointer // head *sudog
tail unsafe.Pointer _ unsafe.Pointer // tail *sudog
} }
// See runtime/sema.go for documentation. // See runtime/sema.go for documentation.
@ -260,3 +275,12 @@ func runtime_notifyListWait(l *notifyList, t uint32)
// //
//go:linkname runtime_notifyListNotifyAll sync.runtime_notifyListNotifyAll //go:linkname runtime_notifyListNotifyAll sync.runtime_notifyListNotifyAll
func runtime_notifyListNotifyAll(l *notifyList) func runtime_notifyListNotifyAll(l *notifyList)
// Ensure that sync and runtime agree on size of notifyList.
//
//go:linkname runtime_notifyListCheck sync.runtime_notifyListCheck
func runtime_notifyListCheck(size uintptr)
func init() {
var n notifyList
runtime_notifyListCheck(unsafe.Sizeof(n))
}

2
vendor/modules.txt vendored
View file

@ -47,7 +47,7 @@ codeberg.org/gruf/go-mangler
# codeberg.org/gruf/go-maps v1.0.3 # codeberg.org/gruf/go-maps v1.0.3
## explicit; go 1.19 ## explicit; go 1.19
codeberg.org/gruf/go-maps codeberg.org/gruf/go-maps
# codeberg.org/gruf/go-mutexes v1.3.0 # codeberg.org/gruf/go-mutexes v1.3.1
## explicit; go 1.14 ## explicit; go 1.14
codeberg.org/gruf/go-mutexes codeberg.org/gruf/go-mutexes
# codeberg.org/gruf/go-runners v1.6.1 # codeberg.org/gruf/go-runners v1.6.1