[bugfix] update go-cache version to support multi-keying (#1756)

* update go-cache version to support multi-keying

Signed-off-by: kim <grufwub@gmail.com>

* improved cache invalidation

Signed-off-by: kim <grufwub@gmail.com>

---------

Signed-off-by: kim <grufwub@gmail.com>
This commit is contained in:
kim 2023-05-09 15:17:43 +01:00 committed by GitHub
parent 65cd1acbdf
commit 8275d70e38
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 90 additions and 67 deletions

2
go.mod
View file

@ -5,7 +5,7 @@ go 1.20
require (
codeberg.org/gruf/go-bytesize v1.0.2
codeberg.org/gruf/go-byteutil v1.1.2
codeberg.org/gruf/go-cache/v3 v3.2.6
codeberg.org/gruf/go-cache/v3 v3.3.0
codeberg.org/gruf/go-debug v1.3.0
codeberg.org/gruf/go-errors/v2 v2.2.0
codeberg.org/gruf/go-fastcopy v1.1.2

4
go.sum
View file

@ -49,8 +49,8 @@ codeberg.org/gruf/go-bytesize v1.0.2/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacp
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
codeberg.org/gruf/go-byteutil v1.1.2 h1:TQLZtTxTNca9xEfDIndmo7nBYxeS94nrv/9DS3Nk5Tw=
codeberg.org/gruf/go-byteutil v1.1.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
codeberg.org/gruf/go-cache/v3 v3.2.6 h1:PtAGOvCTGwhqOqIEFBP4M0F6xbaAWYe3t/7QYGNzulI=
codeberg.org/gruf/go-cache/v3 v3.2.6/go.mod h1:pTeVPEb9DshXUkd8Dg76UcsLpU6EC/tXQ2qb+JrmxEc=
codeberg.org/gruf/go-cache/v3 v3.3.0 h1:Bor75j4MYJIDqH22/aQvmwA7hMqBOzDOWdSQz25Lq+8=
codeberg.org/gruf/go-cache/v3 v3.3.0/go.mod h1:pTeVPEb9DshXUkd8Dg76UcsLpU6EC/tXQ2qb+JrmxEc=
codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs=
codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=

View file

@ -65,8 +65,9 @@ func (c *Caches) Stop() {
}
// setuphooks sets necessary cache invalidation hooks between caches,
// as an invalidation indicates a database UPDATE / DELETE. INSERT is
// not handled by invalidation hooks and must be invalidated manually.
// as an invalidation indicates a database INSERT / UPDATE / DELETE.
// NOTE THEY ARE ONLY CALLED WHEN THE ITEM IS IN THE CACHE, SO FOR
// HOOKS TO BE CALLED ON DELETE YOU MUST FIRST POPULATE IT IN THE CACHE.
func (c *Caches) setuphooks() {
c.GTS.Account().SetInvalidateCallback(func(account *gtsmodel.Account) {
// Invalidate account ID cached visibility.
@ -103,13 +104,18 @@ func (c *Caches) setuphooks() {
c.Visibility.Invalidate("ItemID", followReq.TargetAccountID)
c.Visibility.Invalidate("RequesterID", followReq.TargetAccountID)
// Invalidate any cached follow corresponding to this request.
c.GTS.Follow().Invalidate("AccountID.TargetAccountID", followReq.AccountID, followReq.TargetAccountID)
// Invalidate any cached follow with same ID.
c.GTS.Follow().Invalidate("ID", followReq.ID)
})
c.GTS.Status().SetInvalidateCallback(func(status *gtsmodel.Status) {
// Invalidate status ID cached visibility.
c.Visibility.Invalidate("ItemID", status.ID)
for _, id := range status.AttachmentIDs {
// Invalidate cache for attached media IDs,
c.GTS.Media().Invalidate("ID", id)
}
})
c.GTS.User().SetInvalidateCallback(func(user *gtsmodel.User) {

View file

@ -30,8 +30,8 @@ type VisibilityCache struct {
// NOTE: the cache MUST NOT be in use anywhere, this is not thread-safe.
func (c *VisibilityCache) Init() {
c.Cache = result.New([]result.Lookup{
{Name: "ItemID"},
{Name: "RequesterID"},
{Name: "ItemID", Multi: true},
{Name: "RequesterID", Multi: true},
{Name: "Type.RequesterID.ItemID"},
}, func(v1 *CachedVisibility) *CachedVisibility {
v2 := new(CachedVisibility)

View file

@ -208,14 +208,17 @@ func (r *relationshipDB) AcceptFollowRequest(ctx context.Context, sourceAccountI
Notify: followReq.Notify,
}
if err := r.state.Caches.GTS.Follow().Store(follow, func() error {
// If the follow already exists, just
// replace the URI with the new one.
if _, err := r.conn.
_, err := r.conn.
NewInsert().
Model(follow).
On("CONFLICT (?,?) DO UPDATE set ? = ?", bun.Ident("account_id"), bun.Ident("target_account_id"), bun.Ident("uri"), follow.URI).
Exec(ctx); err != nil {
return nil, r.conn.ProcessError(err)
Exec(ctx)
return r.conn.ProcessError(err)
}); err != nil {
return nil, err
}
// Delete original follow request.
@ -227,8 +230,7 @@ func (r *relationshipDB) AcceptFollowRequest(ctx context.Context, sourceAccountI
return nil, r.conn.ProcessError(err)
}
// Invalidate follow request from cache lookups; this will
// invalidate the follow as well via the invalidate hook.
// Invalidate follow request from cache lookups
r.state.Caches.GTS.FollowRequest().Invalidate("ID", followReq.ID)
// Delete original follow request notification

View file

@ -19,6 +19,10 @@ type Lookup struct {
// under zero value keys, otherwise ignore them.
AllowZero bool
// Multi allows specifying a key capable of storing
// multiple results. Note this only supports invalidate.
Multi bool
// TODO: support toggling case sensitive lookups.
// CaseSensitive bool
}
@ -155,10 +159,14 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
var (
zero Value
res result[Value]
ok bool
)
// Get lookup key info by name.
keyInfo := c.lookups.get(lookup)
if !keyInfo.unique {
panic("non-unique lookup does not support load: " + lookup)
}
// Generate cache key string.
ckey := keyInfo.genKey(keyParts)
@ -167,11 +175,11 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
c.cache.Lock()
// Look for primary cache key
pkey, ok := keyInfo.pkeys[ckey]
pkeys := keyInfo.pkeys[ckey]
if ok {
if ok = (len(pkeys) > 0); ok {
// Fetch the result for primary key
entry, _ := c.cache.Cache.Get(pkey)
entry, _ := c.cache.Cache.Get(pkeys[0])
res = entry.Value
}
@ -252,9 +260,13 @@ func (c *Cache[Value]) Store(value Value, store func() error) error {
// Has checks the cache for a positive result under the given lookup and key parts.
func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
var res result[Value]
var ok bool
// Get lookup key info by name.
keyInfo := c.lookups.get(lookup)
if !keyInfo.unique {
panic("non-unique lookup does not support has: " + lookup)
}
// Generate cache key string.
ckey := keyInfo.genKey(keyParts)
@ -263,11 +275,11 @@ func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
c.cache.Lock()
// Look for primary key for cache key
pkey, ok := keyInfo.pkeys[ckey]
pkeys := keyInfo.pkeys[ckey]
if ok {
if ok = (len(pkeys) > 0); ok {
// Fetch the result for primary key
entry, _ := c.cache.Cache.Get(pkey)
entry, _ := c.cache.Cache.Get(pkeys[0])
res = entry.Value
}
@ -288,33 +300,35 @@ func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) {
// Look for primary key for cache key
c.cache.Lock()
pkey, ok := keyInfo.pkeys[ckey]
pkeys := keyInfo.pkeys[ckey]
c.cache.Unlock()
if !ok {
return
}
// Invalid by primary key
for _, pkey := range pkeys {
// Invalidate each primary key
c.cache.Invalidate(pkey)
}
}
// Clear empties the cache, calling the invalidate callback.
func (c *Cache[Value]) Clear() {
c.cache.Clear()
}
func (c *Cache[Value]) Clear() { c.cache.Clear() }
// store will cache this result under all of its required cache keys.
func (c *Cache[Value]) store(res result[Value]) {
// Get primary key
pnext := c.next
c.next++
if pnext > c.next {
panic("cache primary key overflow")
}
for _, key := range res.Keys {
pkeys := key.info.pkeys
// Look for cache primary keys.
pkeys := key.info.pkeys[key.key]
// Look for cache primary key
pkey, ok := pkeys[key.key]
if ok {
if key.info.unique && len(pkeys) > 0 {
for _, conflict := range pkeys {
// Get the overlapping result with this key.
entry, _ := c.cache.Cache.Get(pkey)
entry, _ := c.cache.Cache.Get(conflict)
// From conflicting entry, drop this key, this
// will prevent eviction cleanup key confusion.
@ -323,28 +337,23 @@ func (c *Cache[Value]) store(res result[Value]) {
if len(entry.Value.Keys) == 0 {
// We just over-wrote the only lookup key for
// this value, so we drop its primary key too.
c.cache.Cache.Delete(pkey)
}
c.cache.Cache.Delete(conflict)
}
}
// Get primary key
pkey := c.next
c.next++
if pkey > c.next {
panic("cache primary key overflow")
// Drop these keys.
pkeys = pkeys[:0]
}
// Store all primary key lookups
for _, key := range res.Keys {
pkeys := key.info.pkeys
pkeys[key.key] = pkey
// Store primary key lookup.
pkeys = append(pkeys, pnext)
key.info.pkeys[key.key] = pkeys
}
// Store main entry under primary key, using evict hook if needed
c.cache.Cache.SetWithHook(pkey, &ttl.Entry[int64, result[Value]]{
c.cache.Cache.SetWithHook(pnext, &ttl.Entry[int64, result[Value]]{
Expiry: time.Now().Add(c.cache.TTL),
Key: pkey,
Key: pnext,
Value: res,
}, func(_ int64, item *ttl.Entry[int64, result[Value]]) {
c.cache.Evict(item)

View file

@ -122,14 +122,17 @@ type structKey struct {
// zero != "" --> don't allow zero value keys
zero string
// unique determines whether this structKey supports
// multiple or just the singular unique result.
unique bool
// fields is a slice of runtime struct field
// indices, of the fields encompassed by this key.
fields []structField
// pkeys is a lookup of stored struct key values
// to the primary cache lookup key (int64).
pkeys map[string]int64
pkeys map[string][]int64
}
type structField struct {
@ -220,8 +223,11 @@ func newStructKey(lk Lookup, t reflect.Type) structKey {
sk.zero = sk.genKey(zeros)
}
// Set unique lookup flag.
sk.unique = !lk.Multi
// Allocate primary lookup map
sk.pkeys = make(map[string]int64)
sk.pkeys = make(map[string][]int64)
return sk
}

2
vendor/modules.txt vendored
View file

@ -13,7 +13,7 @@ codeberg.org/gruf/go-bytesize
# codeberg.org/gruf/go-byteutil v1.1.2
## explicit; go 1.16
codeberg.org/gruf/go-byteutil
# codeberg.org/gruf/go-cache/v3 v3.2.6
# codeberg.org/gruf/go-cache/v3 v3.3.0
## explicit; go 1.19
codeberg.org/gruf/go-cache/v3
codeberg.org/gruf/go-cache/v3/result