mirror of
https://github.com/superseriousbusiness/gotosocial
synced 2024-11-10 06:54:16 +00:00
[chore] update gruf libraries (#996)
* update go-store to v2.0.6: closer callbacks are now only ever called at most once Signed-off-by: kim <grufwub@gmail.com> * bump go-store => v2.0.7, go-mutexes => v1.1.4 Signed-off-by: kim <grufwub@gmail.com> Signed-off-by: kim <grufwub@gmail.com>
This commit is contained in:
parent
0d51d5c13b
commit
7c0bbd3f6a
6 changed files with 48 additions and 62 deletions
4
go.mod
4
go.mod
|
@ -10,9 +10,9 @@ require (
|
|||
codeberg.org/gruf/go-errors/v2 v2.0.2
|
||||
codeberg.org/gruf/go-kv v1.5.2
|
||||
codeberg.org/gruf/go-logger/v2 v2.2.1
|
||||
codeberg.org/gruf/go-mutexes v1.1.3
|
||||
codeberg.org/gruf/go-mutexes v1.1.4
|
||||
codeberg.org/gruf/go-runners v1.3.1
|
||||
codeberg.org/gruf/go-store/v2 v2.0.5
|
||||
codeberg.org/gruf/go-store/v2 v2.0.7
|
||||
github.com/buckket/go-blurhash v1.1.0
|
||||
github.com/coreos/go-oidc/v3 v3.4.0
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
|
|
8
go.sum
8
go.sum
|
@ -87,16 +87,16 @@ codeberg.org/gruf/go-kv v1.5.2 h1:B0RkAXLUXYn3Za1NzTXOcUvAc+JUC2ZadTMkCUDa0mc=
|
|||
codeberg.org/gruf/go-kv v1.5.2/go.mod h1:al6ASW/2CbGqz2YcM8B00tvWnVi1bU1CH3HYs5tZxo4=
|
||||
codeberg.org/gruf/go-logger/v2 v2.2.1 h1:RP2u059EQKTBFV3cN8X6xDxNk2RkzqdgXGKflKqB7Oc=
|
||||
codeberg.org/gruf/go-logger/v2 v2.2.1/go.mod h1:m/vBfG5jNUmYXI8Hg9aVSk7Pn8YgEBITQB/B/CzdRss=
|
||||
codeberg.org/gruf/go-mutexes v1.1.3 h1:6es80V7De7mh5XCfSUzLL5/QlJQsV6+UZkK7Ljq1tew=
|
||||
codeberg.org/gruf/go-mutexes v1.1.3/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8=
|
||||
codeberg.org/gruf/go-mutexes v1.1.4 h1:HWaIZavPL92SBJxNOlIXAmAT5CB2hAs72/lBN31jnzM=
|
||||
codeberg.org/gruf/go-mutexes v1.1.4/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8=
|
||||
codeberg.org/gruf/go-pools v1.1.0 h1:LbYP24eQLl/YI1fSU2pafiwhGol1Z1zPjRrMsXpF88s=
|
||||
codeberg.org/gruf/go-pools v1.1.0/go.mod h1:ZMYpt/DjQWYC3zFD3T97QWSFKs62zAUGJ/tzvgB9D68=
|
||||
codeberg.org/gruf/go-runners v1.3.1 h1:d/OQMMMiA6yPaDSbSr0/Jc+lucWmm7AiAZjWffpNKVQ=
|
||||
codeberg.org/gruf/go-runners v1.3.1/go.mod h1:rl0EdZNozkRMb21DAtOL5L4oTfmslYQdZgq2RMMc/H4=
|
||||
codeberg.org/gruf/go-sched v1.1.1 h1:YtLSQhpypzuD3HTup5oF7LLWB79gTL4nqW06kH4Vwks=
|
||||
codeberg.org/gruf/go-sched v1.1.1/go.mod h1:SRcdP/5qim+EBT3n3r4aUra1C30yPqV4OJOXuqvgdQM=
|
||||
codeberg.org/gruf/go-store/v2 v2.0.5 h1:AbOka6LkyT9jobPYfK3h5f5dPqx5AjFPhaaqdOWkGyA=
|
||||
codeberg.org/gruf/go-store/v2 v2.0.5/go.mod h1:vKId86ET4ZzG1tE1dMNkfV66rZkcsyqt64UhKt6EYfc=
|
||||
codeberg.org/gruf/go-store/v2 v2.0.7 h1:P+0d8jnXdgzxfHLqKjHMV+MAxVJmq056PvzaHRyR8jE=
|
||||
codeberg.org/gruf/go-store/v2 v2.0.7/go.mod h1:D4r5PV0BXDhxQyATw/03JkwvziZDkVMgzTpElZyWTXI=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
|
|
57
vendor/codeberg.org/gruf/go-mutexes/map.go
generated
vendored
57
vendor/codeberg.org/gruf/go-mutexes/map.go
generated
vendored
|
@ -89,8 +89,8 @@ func acquireState(state uint8, lt uint8) (uint8, bool) {
|
|||
// awaiting a permissible state (.e.g no key write locks allowed when the
|
||||
// map is read locked).
|
||||
type MutexMap struct {
|
||||
qpool pool
|
||||
queue []*sync.Mutex
|
||||
queue *sync.WaitGroup
|
||||
qucnt int32
|
||||
|
||||
mumap map[string]*rwmutex
|
||||
mpool pool
|
||||
|
@ -118,17 +118,8 @@ func NewMap(max, wake int32) MutexMap {
|
|||
}
|
||||
|
||||
return MutexMap{
|
||||
qpool: pool{
|
||||
alloc: func() interface{} {
|
||||
return &sync.Mutex{}
|
||||
},
|
||||
},
|
||||
queue: &sync.WaitGroup{},
|
||||
mumap: make(map[string]*rwmutex, max),
|
||||
mpool: pool{
|
||||
alloc: func() interface{} {
|
||||
return &rwmutex{}
|
||||
},
|
||||
},
|
||||
maxmu: max,
|
||||
wake: wake,
|
||||
}
|
||||
|
@ -170,36 +161,26 @@ func (mm *MutexMap) SET(max, wake int32) (int32, int32) {
|
|||
|
||||
// spinLock will wait (using a mutex to sleep thread) until conditional returns true.
|
||||
func (mm *MutexMap) spinLock(cond func() bool) {
|
||||
var mu *sync.Mutex
|
||||
|
||||
for {
|
||||
// Acquire map lock
|
||||
mm.mapmu.Lock()
|
||||
|
||||
if cond() {
|
||||
// Release mu if needed
|
||||
if mu != nil {
|
||||
mm.qpool.Release(mu)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Alloc mu if needed
|
||||
if mu == nil {
|
||||
v := mm.qpool.Acquire()
|
||||
mu = v.(*sync.Mutex)
|
||||
}
|
||||
// Current queue ptr
|
||||
queue := mm.queue
|
||||
|
||||
// Queue ourselves
|
||||
mm.queue = append(mm.queue, mu)
|
||||
mu.Lock()
|
||||
queue.Add(1)
|
||||
mm.qucnt++
|
||||
|
||||
// Unlock map
|
||||
mm.mapmu.Unlock()
|
||||
|
||||
// Wait on notify
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
mm.queue.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,9 +217,8 @@ func (mm *MutexMap) lock(key string, lt uint8) func() {
|
|||
if !ok {
|
||||
// No mutex found for key
|
||||
|
||||
// Alloc from pool
|
||||
v := mm.mpool.Acquire()
|
||||
mu = v.(*rwmutex)
|
||||
// Alloc mu from pool
|
||||
mu = mm.mpool.Acquire()
|
||||
mm.mumap[key] = mu
|
||||
|
||||
// Set our key
|
||||
|
@ -301,13 +281,12 @@ func (mm *MutexMap) cleanup() {
|
|||
|
||||
go func() {
|
||||
if wakemod == 0 {
|
||||
// Notify queued routines
|
||||
for _, mu := range mm.queue {
|
||||
mu.Unlock()
|
||||
}
|
||||
// Release queued goroutines
|
||||
mm.queue.Add(-int(mm.qucnt))
|
||||
|
||||
// Reset queue
|
||||
mm.queue = mm.queue[:0]
|
||||
// Allocate new queue and reset
|
||||
mm.queue = &sync.WaitGroup{}
|
||||
mm.qucnt = 0
|
||||
}
|
||||
|
||||
if mm.count == 0 {
|
||||
|
@ -323,7 +302,6 @@ func (mm *MutexMap) cleanup() {
|
|||
mm.evict = mm.evict[:0]
|
||||
mm.state = stateUnlockd
|
||||
mm.mpool.GC()
|
||||
mm.qpool.GC()
|
||||
}
|
||||
|
||||
// Unlock map
|
||||
|
@ -412,9 +390,8 @@ func (st *LockState) lock(key string, lt uint8) func() {
|
|||
if !ok {
|
||||
// No mutex found for key
|
||||
|
||||
// Alloc from pool
|
||||
v := st.mmap.mpool.Acquire()
|
||||
mu = v.(*rwmutex)
|
||||
// Alloc mu from pool
|
||||
mu = st.mmap.mpool.Acquire()
|
||||
st.mmap.mumap[key] = mu
|
||||
|
||||
// Set our key
|
||||
|
|
|
@ -2,34 +2,33 @@ package mutexes
|
|||
|
||||
// pool is a very simply memory pool.
|
||||
type pool struct {
|
||||
current []interface{}
|
||||
victim []interface{}
|
||||
alloc func() interface{}
|
||||
current []*rwmutex
|
||||
victim []*rwmutex
|
||||
}
|
||||
|
||||
// Acquire will returns a sync.RWMutex from pool (or alloc new).
|
||||
func (p *pool) Acquire() interface{} {
|
||||
// Acquire will returns a rwmutex from pool (or alloc new).
|
||||
func (p *pool) Acquire() *rwmutex {
|
||||
// First try the current queue
|
||||
if l := len(p.current) - 1; l >= 0 {
|
||||
v := p.current[l]
|
||||
mu := p.current[l]
|
||||
p.current = p.current[:l]
|
||||
return v
|
||||
return mu
|
||||
}
|
||||
|
||||
// Next try the victim queue.
|
||||
if l := len(p.victim) - 1; l >= 0 {
|
||||
v := p.victim[l]
|
||||
mu := p.victim[l]
|
||||
p.victim = p.victim[:l]
|
||||
return v
|
||||
return mu
|
||||
}
|
||||
|
||||
// Lastly, alloc new.
|
||||
return p.alloc()
|
||||
return &rwmutex{}
|
||||
}
|
||||
|
||||
// Release places a sync.RWMutex back in the pool.
|
||||
func (p *pool) Release(v interface{}) {
|
||||
p.current = append(p.current, v)
|
||||
func (p *pool) Release(mu *rwmutex) {
|
||||
p.current = append(p.current, mu)
|
||||
}
|
||||
|
||||
// GC will clear out unused entries from the pool.
|
14
vendor/codeberg.org/gruf/go-store/v2/util/io.go
generated
vendored
14
vendor/codeberg.org/gruf/go-store/v2/util/io.go
generated
vendored
|
@ -48,6 +48,7 @@ func NopWriteCloser(w io.Writer) io.WriteCloser {
|
|||
}
|
||||
|
||||
// ReadCloserWithCallback adds a customizable callback to be called upon Close() of a supplied io.ReadCloser.
|
||||
// Note that the callback will never be called more than once, after execution this will remove the func reference.
|
||||
func ReadCloserWithCallback(rc io.ReadCloser, cb func()) io.ReadCloser {
|
||||
return &callbackReadCloser{
|
||||
ReadCloser: rc,
|
||||
|
@ -56,6 +57,7 @@ func ReadCloserWithCallback(rc io.ReadCloser, cb func()) io.ReadCloser {
|
|||
}
|
||||
|
||||
// WriteCloserWithCallback adds a customizable callback to be called upon Close() of a supplied io.WriteCloser.
|
||||
// Note that the callback will never be called more than once, after execution this will remove the func reference.
|
||||
func WriteCloserWithCallback(wc io.WriteCloser, cb func()) io.WriteCloser {
|
||||
return &callbackWriteCloser{
|
||||
WriteCloser: wc,
|
||||
|
@ -80,7 +82,11 @@ type callbackReadCloser struct {
|
|||
}
|
||||
|
||||
func (c *callbackReadCloser) Close() error {
|
||||
defer c.callback()
|
||||
if c.callback != nil {
|
||||
cb := c.callback
|
||||
c.callback = nil
|
||||
defer cb()
|
||||
}
|
||||
return c.ReadCloser.Close()
|
||||
}
|
||||
|
||||
|
@ -91,6 +97,10 @@ type callbackWriteCloser struct {
|
|||
}
|
||||
|
||||
func (c *callbackWriteCloser) Close() error {
|
||||
defer c.callback()
|
||||
if c.callback != nil {
|
||||
cb := c.callback
|
||||
c.callback = nil
|
||||
defer cb()
|
||||
}
|
||||
return c.WriteCloser.Close()
|
||||
}
|
||||
|
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
|
@ -38,7 +38,7 @@ codeberg.org/gruf/go-kv/format
|
|||
# codeberg.org/gruf/go-logger/v2 v2.2.1
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-logger/v2/level
|
||||
# codeberg.org/gruf/go-mutexes v1.1.3
|
||||
# codeberg.org/gruf/go-mutexes v1.1.4
|
||||
## explicit; go 1.14
|
||||
codeberg.org/gruf/go-mutexes
|
||||
# codeberg.org/gruf/go-pools v1.1.0
|
||||
|
@ -50,7 +50,7 @@ codeberg.org/gruf/go-runners
|
|||
# codeberg.org/gruf/go-sched v1.1.1
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-sched
|
||||
# codeberg.org/gruf/go-store/v2 v2.0.5
|
||||
# codeberg.org/gruf/go-store/v2 v2.0.7
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-store/v2/kv
|
||||
codeberg.org/gruf/go-store/v2/storage
|
||||
|
|
Loading…
Reference in a new issue