update go-structr v0.2.0 => v0.3.0 to fix possible hash collision issues (#2586)

This commit is contained in:
kim 2024-01-29 15:13:53 +00:00 committed by GitHub
parent aa8bbe6ad2
commit 81198fa2d0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 836 additions and 755 deletions

4
go.mod
View file

@ -5,6 +5,7 @@ go 1.21
toolchain go1.21.3
require (
codeberg.org/gruf/go-bytes v1.0.2
codeberg.org/gruf/go-bytesize v1.0.2
codeberg.org/gruf/go-byteutil v1.2.0
codeberg.org/gruf/go-cache/v3 v3.5.7
@ -18,7 +19,7 @@ require (
codeberg.org/gruf/go-runners v1.6.2
codeberg.org/gruf/go-sched v1.2.3
codeberg.org/gruf/go-store/v2 v2.2.4
codeberg.org/gruf/go-structr v0.2.0
codeberg.org/gruf/go-structr v0.3.0
codeberg.org/superseriousbusiness/exif-terminator v0.7.0
github.com/DmitriyVTitov/size v1.5.0
github.com/KimMachineGun/automemlimit v0.5.0
@ -80,7 +81,6 @@ require (
require (
codeberg.org/gruf/go-atomics v1.1.0 // indirect
codeberg.org/gruf/go-bitutil v1.1.0 // indirect
codeberg.org/gruf/go-bytes v1.0.2 // indirect
codeberg.org/gruf/go-fastpath/v2 v2.0.0 // indirect
codeberg.org/gruf/go-maps v1.0.3 // indirect
github.com/aymerick/douceur v0.2.0 // indirect

4
go.sum
View file

@ -68,8 +68,8 @@ codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk
codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A=
codeberg.org/gruf/go-store/v2 v2.2.4 h1:8HO1Jh2gg7boQKA3hsDAIXd9zwieu5uXwDXEcTOD9js=
codeberg.org/gruf/go-store/v2 v2.2.4/go.mod h1:zI4VWe5CpXAktYMtaBMrgA5QmO0sQH53LBRvfn1huys=
codeberg.org/gruf/go-structr v0.2.0 h1:9U9uWae4j//HxpztDjw4z07WJi+8F8gMrRPLDBZ/rw4=
codeberg.org/gruf/go-structr v0.2.0/go.mod h1:iTMx2Jw2yekHdg4VVY9Clz5u8Suf9veGdk3sWwNmM4M=
codeberg.org/gruf/go-structr v0.3.0 h1:qaQz40LVm6dWDDp0pGsHbsbO0+XbqsXZ9N5YgqMmG78=
codeberg.org/gruf/go-structr v0.3.0/go.mod h1:v9TsGsCBNNSVm/qeOuiblAeIS72YyxEIUoRpW8j4xm8=
codeberg.org/superseriousbusiness/exif-terminator v0.7.0 h1:Y6VApSXhKqExG0H2hZ2JelRK4xmWdjDQjn13CpEfzko=
codeberg.org/superseriousbusiness/exif-terminator v0.7.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=

View file

@ -2,6 +2,8 @@
A performant struct caching library with automated indexing by arbitrary combinations of fields, including support for negative results (errors!). An example use case is in database lookups.
Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed with either 32bit, 48bit or 64bit (default) hash checksum of the inputted raw index keys. The hash checksum size can be controlled by the following Go build-tags: `structr_32bit_hash` `structr_48bit_hash`
Some example code of how you can use `go-structr` in your application:
```golang
type Cached struct {

View file

@ -3,7 +3,6 @@ package structr
import (
"context"
"errors"
"reflect"
"sync"
)
@ -63,12 +62,7 @@ type Cache[StructType any] struct {
// keeps track of all indexed results,
// in order of last recently used (LRU).
lruList list[*result[StructType]]
// memory pools of common types.
llsPool []*list[*result[StructType]]
resPool []*result[StructType]
keyPool []*indexkey[StructType]
lruList list
// max cache size, imposes size
// limit on the lruList in order
@ -84,7 +78,6 @@ type Cache[StructType any] struct {
// - Cache{}.lruList
// - Index{}.data
// - Cache{} hook fns
// - Cache{} pools
mutex sync.Mutex
}
@ -112,7 +105,7 @@ func (c *Cache[T]) Init(config Config[T]) {
c.mutex.Lock()
c.indices = make([]Index[T], len(config.Indices))
for i, cfg := range config.Indices {
c.indices[i].init(cfg, config.MaxSize)
init_index(&c.indices[i], cfg, config.MaxSize)
}
c.ignore = config.IgnoreErr
c.copy = config.CopyValue
@ -133,26 +126,15 @@ func (c *Cache[T]) Index(name string) *Index[T] {
// GetOne fetches one value from the cache stored under index, using key generated from key parts.
// Note that given number of key parts MUST match expected number and types of the given index name.
func (c *Cache[T]) GetOne(index string, keyParts ...any) (T, bool) {
// Get index with name.
idx := c.Index(index)
// Generate index key from provided parts.
key, ok := idx.hasher.FromParts(keyParts...)
if !ok {
var zero T
return zero, false
}
// Fetch one value for key.
return c.GetOneBy(idx, key)
func (c *Cache[T]) GetOne(index string, key ...any) (T, bool) {
return c.GetOneBy(c.Index(index), key...)
}
// GetOneBy fetches value from cache stored under index, using precalculated index key.
func (c *Cache[T]) GetOneBy(index *Index[T], key uint64) (T, bool) {
func (c *Cache[T]) GetOneBy(index *Index[T], key ...any) (T, bool) {
if index == nil {
panic("no index given")
} else if !index.unique {
} else if !is_unique(index.flags) {
panic("cannot get one by non-unique index")
}
values := c.GetBy(index, key)
@ -165,44 +147,18 @@ func (c *Cache[T]) GetOneBy(index *Index[T], key uint64) (T, bool) {
// Get fetches values from the cache stored under index, using keys generated from given key parts.
// Note that each number of key parts MUST match expected number and types of the given index name.
func (c *Cache[T]) Get(index string, keysParts ...[]any) []T {
// Get index with name.
idx := c.Index(index)
// Preallocate expected keys slice length.
keys := make([]uint64, 0, len(keysParts))
// Acquire hasher.
h := getHasher()
for _, parts := range keysParts {
h.Reset()
// Generate key from provided parts into buffer.
key, ok := idx.hasher.fromParts(h, parts...)
if !ok {
continue
}
// Append hash sum to keys.
keys = append(keys, key)
}
// Done with h.
putHasher(h)
// Continue fetching values.
return c.GetBy(idx, keys...)
func (c *Cache[T]) Get(index string, keys ...[]any) []T {
return c.GetBy(c.Index(index), keys...)
}
// GetBy fetches values from the cache stored under index, using precalculated index keys.
func (c *Cache[T]) GetBy(index *Index[T], keys ...uint64) []T {
func (c *Cache[T]) GetBy(index *Index[T], keys ...[]any) []T {
if index == nil {
panic("no index given")
}
// Preallocate a slice of est. len.
values := make([]T, 0, len(keys))
// Acquire hasher.
h := get_hasher()
// Acquire lock.
c.mutex.Lock()
@ -213,40 +169,61 @@ func (c *Cache[T]) GetBy(index *Index[T], keys ...uint64) []T {
panic("not initialized")
}
// Check index for all keys.
// Preallocate expected ret slice.
values := make([]T, 0, len(keys))
for _, key := range keys {
// Get indexed results.
list := index.data[key]
// Generate sum from provided key.
sum, ok := index_hash(index, h, key)
if !ok {
continue
}
if list != nil {
// Concatenate all results with values.
list.rangefn(func(e *elem[*result[T]]) {
if e.Value.err != nil {
return
}
// Get indexed results list at key.
list := index_get(index, sum, key)
if list == nil {
continue
}
// Append a copy of value.
value := c.copy(e.Value.value)
// Concatenate all *values* from non-err cached results.
list_rangefn(list, func(e *list_elem) {
entry := (*index_entry)(e.data)
res := entry.result
switch value := res.data.(type) {
case T:
// Append value COPY.
value = c.copy(value)
values = append(values, value)
// Push to front of LRU list, USING
// THE RESULT'S LRU ENTRY, NOT THE
// INDEX KEY ENTRY. VERY IMPORTANT!!
c.lruList.moveFront(&e.Value.entry)
})
}
case error:
// Don't bump
// for errors.
return
}
// Push to front of LRU list, USING
// THE RESULT'S LRU ENTRY, NOT THE
// INDEX KEY ENTRY. VERY IMPORTANT!!
list_move_front(&c.lruList, &res.elem)
})
}
// Done with lock.
c.mutex.Unlock()
// Done with h.
hash_pool.Put(h)
return values
}
// Put will insert the given values into cache,
// calling any invalidate hook on each value.
func (c *Cache[T]) Put(values ...T) {
var z Hash
// Acquire lock.
c.mutex.Lock()
@ -261,7 +238,7 @@ func (c *Cache[T]) Put(values ...T) {
// Store all the passed values.
for _, value := range values {
c.store(nil, 0, value, nil)
c.store_value(nil, z, nil, value)
}
// Done with lock.
@ -279,23 +256,16 @@ func (c *Cache[T]) Put(values ...T) {
// LoadOne fetches one result from the cache stored under index, using key generated from key parts.
// In the case that no result is found, the provided load callback will be used to hydrate the cache.
// Note that given number of key parts MUST match expected number and types of the given index name.
func (c *Cache[T]) LoadOne(index string, load func() (T, error), keyParts ...any) (T, error) {
// Get index with name.
idx := c.Index(index)
// Generate cache from from provided parts.
key, _ := idx.hasher.FromParts(keyParts...)
// Continue loading this result.
return c.LoadOneBy(idx, load, key)
func (c *Cache[T]) LoadOne(index string, load func() (T, error), key ...any) (T, error) {
return c.LoadOneBy(c.Index(index), load, key...)
}
// LoadOneBy fetches one result from the cache stored under index, using precalculated index key.
// In the case that no result is found, provided load callback will be used to hydrate the cache.
func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64) (T, error) {
func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key ...any) (T, error) {
if index == nil {
panic("no index given")
} else if !index.unique {
} else if !is_unique(index.flags) {
panic("cannot get one by non-unique index")
}
@ -311,6 +281,15 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
err error
)
// Acquire hasher.
h := get_hasher()
// Generate sum from provided key.
sum, _ := index_hash(index, h, key)
// Done with h.
hash_pool.Put(h)
// Acquire lock.
c.mutex.Lock()
@ -324,26 +303,26 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
panic("not initialized")
}
// Get indexed results.
list := index.data[key]
// Get indexed list at hash key.
list := index_get(index, sum, key)
if ok = (list != nil && list.head != nil); ok {
e := list.head
if ok = (list != nil); ok {
entry := (*index_entry)(list.head.data)
res := entry.result
// Extract val / err.
val = e.Value.value
err = e.Value.err
if err == nil {
// We only ever ret
// a COPY of value.
val = c.copy(val)
switch data := res.data.(type) {
case T:
// Return value COPY.
val = c.copy(data)
case error:
// Return error.
err = data
}
// Push to front of LRU list, USING
// THE RESULT'S LRU ENTRY, NOT THE
// INDEX KEY ENTRY. VERY IMPORTANT!!
c.lruList.moveFront(&e.Value.entry)
list_move_front(&c.lruList, &res.elem)
}
// Done with lock.
@ -370,7 +349,11 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
// Note this handles copying of
// the provided value, so it is
// safe for us to return as-is.
c.store(index, key, val, err)
if err != nil {
c.store_error(index, sum, key, err)
} else {
c.store_value(index, sum, key, val)
}
// Done with lock.
c.mutex.Unlock()
@ -384,7 +367,7 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key uint64
// callback to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load
// the remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
func (c *Cache[T]) Load(index string, get func(load func(keyParts ...any) bool), load func() ([]T, error)) (values []T, err error) {
func (c *Cache[T]) Load(index string, get func(load func(key ...any) bool), load func() ([]T, error)) (values []T, err error) {
return c.LoadBy(c.Index(index), get, load)
}
@ -394,11 +377,14 @@ func (c *Cache[T]) Load(index string, get func(load func(keyParts ...any) bool),
// to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load the
// remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(keyParts ...any) bool), load func() ([]T, error)) (values []T, err error) {
func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(key ...any) bool), load func() ([]T, error)) (values []T, err error) {
if index == nil {
panic("no index given")
}
// Acquire hasher.
h := get_hasher()
// Acquire lock.
c.mutex.Lock()
@ -417,58 +403,60 @@ func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(keyParts ...any) b
}
}()
// Acquire hasher.
h := getHasher()
// Pass loader to user func.
get(func(key ...any) bool {
// Pass cache check to user func.
get(func(keyParts ...any) bool {
h.Reset()
// Generate index key from provided key parts.
key, ok := index.hasher.fromParts(h, keyParts...)
// Generate sum from provided key.
sum, ok := index_hash(index, h, key)
if !ok {
return false
}
// Get all indexed results.
list := index.data[key]
if list != nil && list.len > 0 {
// Value length before
// any below appends.
before := len(values)
// Concatenate all results with values.
list.rangefn(func(e *elem[*result[T]]) {
if e.Value.err != nil {
return
}
// Append a copy of value.
value := c.copy(e.Value.value)
values = append(values, value)
// Push to front of LRU list, USING
// THE RESULT'S LRU ENTRY, NOT THE
// INDEX KEY ENTRY. VERY IMPORTANT!!
c.lruList.moveFront(&e.Value.entry)
})
// Only if values changed did
// we actually find anything.
return len(values) != before
// Get indexed results at hash key.
list := index_get(index, sum, key)
if list == nil {
return false
}
return false
})
// Value length before
// any below appends.
before := len(values)
// Done with h.
putHasher(h)
// Concatenate all *values* from non-err cached results.
list_rangefn(list, func(e *list_elem) {
entry := (*index_entry)(e.data)
res := entry.result
switch value := res.data.(type) {
case T:
// Append value COPY.
value = c.copy(value)
values = append(values, value)
case error:
// Don't bump
// for errors.
return
}
// Push to front of LRU list, USING
// THE RESULT'S LRU ENTRY, NOT THE
// INDEX KEY ENTRY. VERY IMPORTANT!!
list_move_front(&c.lruList, &res.elem)
})
// Only if values changed did
// we actually find anything.
return len(values) != before
})
// Done with lock.
c.mutex.Unlock()
unlocked = true
// Done with h.
hash_pool.Put(h)
// Load uncached values.
uncached, err := load()
if err != nil {
@ -514,26 +502,29 @@ func (c *Cache[T]) Store(value T, store func() error) error {
}
// Invalidate generates index key from parts and invalidates all stored under it.
func (c *Cache[T]) Invalidate(index string, keyParts ...any) {
// Get index with name.
idx := c.Index(index)
// Generate cache from from provided parts.
key, ok := idx.hasher.FromParts(keyParts...)
if !ok {
return
}
// Continue invalidation.
c.InvalidateBy(idx, key)
func (c *Cache[T]) Invalidate(index string, key ...any) {
c.InvalidateBy(c.Index(index), key...)
}
// InvalidateBy invalidates all results stored under index key.
func (c *Cache[T]) InvalidateBy(index *Index[T], key uint64) {
func (c *Cache[T]) InvalidateBy(index *Index[T], key ...any) {
if index == nil {
panic("no index given")
}
// Acquire hasher.
h := get_hasher()
// Generate sum from provided key.
sum, ok := index_hash(index, h, key)
// Done with h.
hash_pool.Put(h)
if !ok {
return
}
var values []T
// Acquire lock.
@ -544,9 +535,13 @@ func (c *Cache[T]) InvalidateBy(index *Index[T], key uint64) {
// Delete all results under key from index, collecting
// value results and dropping them from all their indices.
index_delete(c, index, key, func(del *result[T]) {
if del.err == nil {
values = append(values, del.value)
index_delete(c, index, sum, key, func(del *result) {
switch value := del.data.(type) {
case T:
// Append value COPY.
value = c.copy(value)
values = append(values, value)
case error:
}
c.delete(del)
})
@ -592,7 +587,8 @@ func (c *Cache[T]) Trim(perc float64) {
}
// Drop oldest from cache.
c.delete(oldest.Value)
res := (*result)(oldest.data)
c.delete(res)
}
// Done with lock.
@ -602,16 +598,6 @@ func (c *Cache[T]) Trim(perc float64) {
// Clear empties the cache by calling .Trim(0).
func (c *Cache[T]) Clear() { c.Trim(0) }
// Clean drops unused items from its memory pools.
// Useful to free memory if cache has downsized.
func (c *Cache[T]) Clean() {
c.mutex.Lock()
c.llsPool = nil
c.resPool = nil
c.keyPool = nil
c.mutex.Unlock()
}
// Len returns the current length of cache.
func (c *Cache[T]) Len() int {
c.mutex.Lock()
@ -628,91 +614,93 @@ func (c *Cache[T]) Cap() int {
return m
}
// store will store the given value / error result in the cache, storing it under the
// already provided index + key if provided, else generating keys from provided value.
func (c *Cache[T]) store(index *Index[T], key uint64, value T, err error) {
func (c *Cache[T]) store_value(index *Index[T], hash Hash, key []any, value T) {
// Acquire new result.
res := result_acquire(c)
if index != nil {
// Append result to the provided
// precalculated key and its index.
index_append(c, index, key, res)
} else if err != nil {
// This is an error result without
// an index provided, nothing we
// can do here so release result.
result_release(c, res)
return
// Append result to the provided index
// with precalculated key / its hash.
index_append(c, index, hash, key, res)
}
// Set and check the result error.
if res.err = err; res.err == nil {
// Create COPY of value.
value = c.copy(value)
res.data = value
// This is value result, we need to
// store it under all other indices
// other than the provided.
//
// Create COPY of value.
res.value = c.copy(value)
// Acquire hasher.
h := get_hasher()
// Get reflected value of incoming
// value, used during cache key gen.
rvalue := reflect.ValueOf(value)
for i := range c.indices {
// Get current index ptr.
idx := &(c.indices[i])
// Acquire hasher.
h := getHasher()
for i := range c.indices {
// Get current index ptr.
idx := &(c.indices[i])
if idx == index {
// Already stored under
// this index, ignore.
continue
}
// Generate hash from reflect value,
// (this ignores zero value keys).
h.Reset() // reset buf first
key, ok := idx.hasher.fromRValue(h, rvalue)
if !ok {
continue
}
// Append result to index at key.
index_append(c, idx, key, res)
if idx == index {
// Already stored under
// this index, ignore.
continue
}
// Done with h.
putHasher(h)
// Get key and hash sum for this index.
key, sum, ok := index_key(idx, h, value)
if !ok {
continue
}
// Append result to index at key.
index_append(c, idx, sum, key, res)
}
// Done with h.
hash_pool.Put(h)
if c.lruList.len > c.maxSize {
// Cache has hit max size!
// Drop the oldest element.
res := c.lruList.tail.Value
ptr := c.lruList.tail.data
res := (*result)(ptr)
c.delete(res)
}
}
func (c *Cache[T]) store_error(index *Index[T], hash Hash, key []any, err error) {
if index == nil {
// nothing we
// can do here.
return
}
// Acquire new result.
res := result_acquire(c)
res.data = err
// Append result to the provided index
// with precalculated key / its hash.
index_append(c, index, hash, key, res)
if c.lruList.len > c.maxSize {
// Cache has hit max size!
// Drop the oldest element.
ptr := c.lruList.tail.data
res := (*result)(ptr)
c.delete(res)
}
}
// delete will delete the given result from the cache, deleting
// it from all indices it is stored under, and main LRU list.
func (c *Cache[T]) delete(res *result[T]) {
for len(res.keys) != 0 {
func (c *Cache[T]) delete(res *result) {
for len(res.indexed) != 0 {
// Pop indexkey at end of list.
ikey := res.keys[len(res.keys)-1]
res.keys = res.keys[:len(res.keys)-1]
// Pop last indexed entry from list.
entry := res.indexed[len(res.indexed)-1]
res.indexed = res.indexed[:len(res.indexed)-1]
// Drop this result from list at key.
index_deleteOne(c, ikey.index, ikey)
// Drop entry from index.
index_delete_entry(c, entry)
// Release ikey to pool.
indexkey_release(c, ikey)
// Release to memory pool.
index_entry_release(entry)
}
// Release res to pool.

View file

@ -1,41 +0,0 @@
package structr
// String returns a useful debugging repr of result.
// func (r *result[T]) String() string {
// keysbuf := getBuf()
// keysbuf.B = append(keysbuf.B, '[')
// for i := range r.keys {
// keysbuf.B = strconv.AppendQuote(keysbuf.B, r.keys[i].key)
// keysbuf.B = append(keysbuf.B, ',')
// }
// if len(keysbuf.B) > 0 {
// keysbuf.B = keysbuf.B[:len(keysbuf.B)-1]
// }
// keysbuf.B = append(keysbuf.B, ']')
// str := fmt.Sprintf("{value=%v err=%v keys=%s}", r.value, r.err, keysbuf.B)
// putBuf(keysbuf)
// return str
// }
// String returns a useful debugging repr of index.
// func (i *Index[T]) String() string {
// databuf := getBuf()
// for key, values := range i.data {
// databuf.WriteString("key")
// databuf.B = strconv.AppendQuote(databuf.B, key)
// databuf.B = append(databuf.B, '=')
// fmt.Fprintf(databuf, "%v", values)
// databuf.B = append(databuf.B, ' ')
// }
// if len(i.data) > 0 {
// databuf.B = databuf.B[:len(databuf.B)-1]
// }
// str := fmt.Sprintf("{name=%s data={%s}}", i.name, databuf.B)
// putBuf(databuf)
// return str
// }
// String returns a useful debugging repr of indexkey.
// func (i *indexkey[T]) String() string {
// return i.index.name + "[" + strconv.Quote(i.key) + "]"
// }

View file

@ -2,11 +2,50 @@ package structr
import (
"reflect"
"sync"
"unsafe"
"github.com/zeebo/xxh3"
)
var hash_pool sync.Pool
func get_hasher() *xxh3.Hasher {
v := hash_pool.Get()
if v == nil {
v = new(xxh3.Hasher)
}
return v.(*xxh3.Hasher)
}
func hash_sum(fields []structfield, h *xxh3.Hasher, key []any) (Hash, bool) {
if len(key) != len(fields) {
panicf("incorrect number key parts: want=%d received=%d",
len(key),
len(fields),
)
}
var zero bool
h.Reset()
for i, part := range key {
zero = fields[i].hasher(h, part) || zero
}
// See: https://github.com/Cyan4973/xxHash/issues/453#issuecomment-696838445
//
// In order to extract 32-bit from a good 64-bit hash result,
// there are many possible choices, which are all valid.
// I would typically grab the lower 32-bit and call it a day.
//
// Grabbing any other 32-bit (the upper part for example) is fine too.
//
// xoring higher and lower bits makes more sense whenever the produced hash offers dubious quality.
// FNV, for example, has poor mixing in its lower bits, so it's better to mix with the higher bits.
//
// XXH3 already performs significant output mixing before returning the data,
// so it's not beneficial to add another xorfold stage.
return uint64ToHash(h.Sum64()), zero
}
func hasher(t reflect.Type) func(*xxh3.Hasher, any) bool {
switch t.Kind() {
case reflect.Int,
@ -137,13 +176,13 @@ func hasher(t reflect.Type) func(*xxh3.Hasher, any) bool {
}
func hash8bit(h *xxh3.Hasher, a any) bool {
u := *(*uint8)(iface_value(a))
u := *(*uint8)(data_ptr(a))
_, _ = h.Write([]byte{u})
return u == 0
}
func hash8bitptr(h *xxh3.Hasher, a any) bool {
u := (*uint8)(iface_value(a))
u := (*uint8)(data_ptr(a))
if u == nil {
_, _ = h.Write([]byte{
0,
@ -159,13 +198,13 @@ func hash8bitptr(h *xxh3.Hasher, a any) bool {
}
func hash8bitslice(h *xxh3.Hasher, a any) bool {
b := *(*[]byte)(iface_value(a))
b := *(*[]byte)(data_ptr(a))
_, _ = h.Write(b)
return b == nil
}
func hash16bit(h *xxh3.Hasher, a any) bool {
u := *(*uint16)(iface_value(a))
u := *(*uint16)(data_ptr(a))
_, _ = h.Write([]byte{
byte(u),
byte(u >> 8),
@ -174,7 +213,7 @@ func hash16bit(h *xxh3.Hasher, a any) bool {
}
func hash16bitptr(h *xxh3.Hasher, a any) bool {
u := (*uint16)(iface_value(a))
u := (*uint16)(data_ptr(a))
if u == nil {
_, _ = h.Write([]byte{
0,
@ -191,7 +230,7 @@ func hash16bitptr(h *xxh3.Hasher, a any) bool {
}
func hash16bitslice(h *xxh3.Hasher, a any) bool {
u := *(*[]uint16)(iface_value(a))
u := *(*[]uint16)(data_ptr(a))
for i := range u {
_, _ = h.Write([]byte{
byte(u[i]),
@ -202,7 +241,7 @@ func hash16bitslice(h *xxh3.Hasher, a any) bool {
}
func hash32bit(h *xxh3.Hasher, a any) bool {
u := *(*uint32)(iface_value(a))
u := *(*uint32)(data_ptr(a))
_, _ = h.Write([]byte{
byte(u),
byte(u >> 8),
@ -213,7 +252,7 @@ func hash32bit(h *xxh3.Hasher, a any) bool {
}
func hash32bitptr(h *xxh3.Hasher, a any) bool {
u := (*uint32)(iface_value(a))
u := (*uint32)(data_ptr(a))
if u == nil {
_, _ = h.Write([]byte{
0,
@ -232,7 +271,7 @@ func hash32bitptr(h *xxh3.Hasher, a any) bool {
}
func hash32bitslice(h *xxh3.Hasher, a any) bool {
u := *(*[]uint32)(iface_value(a))
u := *(*[]uint32)(data_ptr(a))
for i := range u {
_, _ = h.Write([]byte{
byte(u[i]),
@ -245,7 +284,7 @@ func hash32bitslice(h *xxh3.Hasher, a any) bool {
}
func hash64bit(h *xxh3.Hasher, a any) bool {
u := *(*uint64)(iface_value(a))
u := *(*uint64)(data_ptr(a))
_, _ = h.Write([]byte{
byte(u),
byte(u >> 8),
@ -260,7 +299,7 @@ func hash64bit(h *xxh3.Hasher, a any) bool {
}
func hash64bitptr(h *xxh3.Hasher, a any) bool {
u := (*uint64)(iface_value(a))
u := (*uint64)(data_ptr(a))
if u == nil {
_, _ = h.Write([]byte{
0,
@ -283,7 +322,7 @@ func hash64bitptr(h *xxh3.Hasher, a any) bool {
}
func hash64bitslice(h *xxh3.Hasher, a any) bool {
u := *(*[]uint64)(iface_value(a))
u := *(*[]uint64)(data_ptr(a))
for i := range u {
_, _ = h.Write([]byte{
byte(u[i]),
@ -300,13 +339,13 @@ func hash64bitslice(h *xxh3.Hasher, a any) bool {
}
func hashstring(h *xxh3.Hasher, a any) bool {
s := *(*string)(iface_value(a))
s := *(*string)(data_ptr(a))
_, _ = h.WriteString(s)
return s == ""
}
func hashstringptr(h *xxh3.Hasher, a any) bool {
s := (*string)(iface_value(a))
s := (*string)(data_ptr(a))
if s == nil {
_, _ = h.Write([]byte{
0,
@ -322,7 +361,7 @@ func hashstringptr(h *xxh3.Hasher, a any) bool {
}
func hashstringslice(h *xxh3.Hasher, a any) bool {
s := *(*[]string)(iface_value(a))
s := *(*[]string)(data_ptr(a))
for i := range s {
_, _ = h.WriteString(s[i])
}
@ -363,8 +402,3 @@ func hashjsonmarshaler(h *xxh3.Hasher, a any) bool {
_, _ = h.Write(b)
return b == nil
}
func iface_value(a any) unsafe.Pointer {
type eface struct{ _, v unsafe.Pointer }
return (*eface)(unsafe.Pointer(&a)).v
}

14
vendor/codeberg.org/gruf/go-structr/hash_32.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
//go:build structr_32bit_hash
// +build structr_32bit_hash
package structr
// Hash is the current compiler
// flag defined cache key hash
// checksum type. Here; uint32.
type Hash uint32
// uint64ToHash converts uint64 to currently Hash type.
func uint64ToHash(u uint64) Hash {
return Hash(u >> 32)
}

21
vendor/codeberg.org/gruf/go-structr/hash_48.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
//go:build structr_48bit_hash
// +build structr_48bit_hash
package structr
// Hash is the current compiler
// flag defined cache key hash
// checksum type. Here; uint48.
type Hash [6]byte
// uint64ToHash converts uint64 to currently Hash type.
func uint64ToHash(u uint64) Hash {
return Hash{
0: byte(u),
1: byte(u >> 8),
2: byte(u >> 16),
3: byte(u >> 24),
4: byte(u >> 32),
5: byte(u >> 40),
}
}

14
vendor/codeberg.org/gruf/go-structr/hash_64.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
//go:build !structr_32bit_hash && !structr_48bit_hash
// +build !structr_32bit_hash,!structr_48bit_hash
package structr
// Hash is the current compiler
// flag defined cache key hash
// checksum type. Here; uint64.
type Hash uint64
// uint64ToHash converts uint64 to currently Hash type.
func uint64ToHash(u uint64) Hash {
return Hash(u)
}

View file

@ -1,176 +0,0 @@
package structr
import (
"reflect"
"strings"
"github.com/zeebo/xxh3"
)
// Hasher provides hash checksumming for a configured
// index, based on an arbitrary combination of generic
// paramter struct type's fields. This provides hashing
// both by input of the fields separately, or passing
// an instance of the generic paramter struct type.
//
// Supported field types by the hasher include:
// - ~int
// - ~int8
// - ~int16
// - ~int32
// - ~int64
// - ~float32
// - ~float64
// - ~string
// - slices / ptrs of the above
type Hasher[StructType any] struct {
// fields contains our representation
// of struct fields contained in the
// creation of sums by this hasher.
fields []structfield
// zero specifies whether zero
// value fields are permitted.
zero bool
}
// NewHasher returns a new initialized Hasher for the receiving generic
// parameter type, comprising of the given field strings, and whether to
// allow zero values to be incldued within generated hash checksum values.
func NewHasher[T any](fields []string, allowZero bool) Hasher[T] {
var h Hasher[T]
// Preallocate expected struct field slice.
h.fields = make([]structfield, len(fields))
// Get the reflected struct ptr type.
t := reflect.TypeOf((*T)(nil)).Elem()
for i, fieldName := range fields {
// Split name to account for nesting.
names := strings.Split(fieldName, ".")
// Look for a usable struct field from type.
sfield, ok := findField(t, names, allowZero)
if !ok {
panicf("failed finding field: %s", fieldName)
}
// Set parsed struct field.
h.fields[i] = sfield
}
// Set config flags.
h.zero = allowZero
return h
}
// FromParts generates hash checksum (used as index key) from individual key parts.
func (h *Hasher[T]) FromParts(parts ...any) (sum uint64, ok bool) {
hh := getHasher()
sum, ok = h.fromParts(hh, parts...)
putHasher(hh)
return
}
func (h *Hasher[T]) fromParts(hh *xxh3.Hasher, parts ...any) (sum uint64, ok bool) {
if len(parts) != len(h.fields) {
// User must provide correct number of parts for key.
panicf("incorrect number key parts: want=%d received=%d",
len(parts),
len(h.fields),
)
}
if h.zero {
// Zero values are permitted,
// mangle all values and ignore
// zero value return booleans.
for i, part := range parts {
// Write mangled part to hasher.
_ = h.fields[i].hasher(hh, part)
}
} else {
// Zero values are NOT permitted.
for i, part := range parts {
// Write mangled field to hasher.
z := h.fields[i].hasher(hh, part)
if z {
// The value was zero for
// this type, return early.
return 0, false
}
}
}
return hh.Sum64(), true
}
// FromValue generates hash checksum (used as index key) from a value, via reflection.
func (h *Hasher[T]) FromValue(value T) (sum uint64, ok bool) {
rvalue := reflect.ValueOf(value)
hh := getHasher()
sum, ok = h.fromRValue(hh, rvalue)
putHasher(hh)
return
}
func (h *Hasher[T]) fromRValue(hh *xxh3.Hasher, rvalue reflect.Value) (uint64, bool) {
// Follow any ptrs leading to value.
for rvalue.Kind() == reflect.Pointer {
rvalue = rvalue.Elem()
}
if h.zero {
// Zero values are permitted,
// mangle all values and ignore
// zero value return booleans.
for i := range h.fields {
// Get the reflect value's field at idx.
fv := rvalue.FieldByIndex(h.fields[i].index)
fi := fv.Interface()
// Write mangled field to hasher.
_ = h.fields[i].hasher(hh, fi)
}
} else {
// Zero values are NOT permitted.
for i := range h.fields {
// Get the reflect value's field at idx.
fv := rvalue.FieldByIndex(h.fields[i].index)
fi := fv.Interface()
// Write mangled field to hasher.
z := h.fields[i].hasher(hh, fi)
if z {
// The value was zero for
// this type, return early.
return 0, false
}
}
}
return hh.Sum64(), true
}
type structfield struct {
// index is the reflected index
// of this field (this takes into
// account struct nesting).
index []int
// hasher is the relevant function
// for hashing value of structfield
// into the supplied hashbuf, where
// return value indicates if zero.
hasher func(*xxh3.Hasher, any) bool
}

View file

@ -1,7 +1,12 @@
package structr
import (
"reflect"
"strings"
"sync"
"unsafe"
"github.com/zeebo/xxh3"
)
// IndexConfig defines config variables
@ -13,6 +18,18 @@ type IndexConfig struct {
// keys for this index. Nested fields should
// be specified using periods. An example:
// "Username,Favorites.Color"
//
// Field types supported include:
// - ~int
// - ~int8
// - ~int16
// - ~int32
// - ~int64
// - ~float32
// - ~float64
// - ~string
// - slices of above
// - ptrs of above
Fields string
// Multiple indicates whether to accept multiple
@ -32,12 +49,12 @@ type IndexConfig struct {
}
// Index is an exposed Cache internal model, used to
// generate keys and store struct results by the init
// defined key generation configuration. This model is
// exposed to provide faster lookups in the case that
// you would like to manually provide the used index
// via the Cache.___By() series of functions, or access
// the underlying index key generator.
// extract struct keys, generate hash checksums for them
// and store struct results by the init defined config.
// This model is exposed to provide faster lookups in the
// case that you would like to manually provide the used
// index via the Cache.___By() series of functions, or
// access the underlying index key generator.
type Index[StructType any] struct {
// name is the actual name of this
@ -45,68 +62,168 @@ type Index[StructType any] struct {
// string value of contained fields.
name string
// struct field key hasher.
hasher Hasher[StructType]
// backing data store of the index, containing
// the cached results contained within wrapping
// index_entry{} which also contains the exact
// key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed.
data map[Hash]*list //[*index_entry[StructType]]
// backing in-memory data store of
// generated index keys to result lists.
data map[uint64]*list[*result[StructType]]
// struct fields encompassed by
// keys (+ hashes) of this index.
fields []structfield
// whether to allow
// multiple results
// per index key.
unique bool
// index flags:
// - 1 << 0 = unique
// - 1 << 1 = allow zero
flags uint8
}
// init initializes this index with the given configuration.
func (i *Index[T]) init(config IndexConfig, max int) {
fields := strings.Split(config.Fields, ",")
// Key returns the configured fields as key, and hash sum of key.
func (i *Index[T]) Key(value T) ([]any, Hash, bool) {
h := get_hasher()
key, sum, ok := index_key(i, h, value)
hash_pool.Put(h)
return key, sum, ok
}
func is_unique(f uint8) bool {
const mask = uint8(1) << 0
return f&mask != 0
}
func set_is_unique(f *uint8) {
const mask = uint8(1) << 0
(*f) |= mask
}
func allow_zero(f uint8) bool {
const mask = uint8(1) << 1
return f&mask != 0
}
func set_allow_zero(f *uint8) {
const mask = uint8(1) << 1
(*f) |= mask
}
func init_index[T any](i *Index[T], config IndexConfig, max int) {
// Set name from the raw
// struct fields string.
i.name = config.Fields
i.hasher = NewHasher[T](fields, config.AllowZero)
i.unique = !config.Multiple
i.data = make(map[uint64]*list[*result[T]], max+1)
// Set struct flags.
if config.AllowZero {
set_allow_zero(&i.flags)
}
if !config.Multiple {
set_is_unique(&i.flags)
}
// Split to get the containing struct fields.
fields := strings.Split(config.Fields, ",")
// Preallocate expected struct field slice.
i.fields = make([]structfield, len(fields))
// Get the reflected struct ptr type.
t := reflect.TypeOf((*T)(nil)).Elem()
for x, fieldName := range fields {
// Split name to account for nesting.
names := strings.Split(fieldName, ".")
// Look for usable struct field.
i.fields[x] = find_field(t, names)
}
// Initialize index_entry list store.
i.data = make(map[Hash]*list, max+1)
}
// Hasher returns the hash checksummer associated with this index.
func (i *Index[T]) Hasher() *Hasher[T] {
return &i.hasher
func index_key[T any](i *Index[T], h *xxh3.Hasher, value T) ([]any, Hash, bool) {
key := extract_fields(value, i.fields)
sum, zero := hash_sum(i.fields, h, key)
if zero && !allow_zero(i.flags) {
var zero Hash
return nil, zero, false
}
return key, sum, true
}
func index_append[T any](c *Cache[T], i *Index[T], key uint64, res *result[T]) {
// Acquire + setup indexkey.
ikey := indexkey_acquire(c)
ikey.entry.Value = res
ikey.key = key
ikey.index = i
func index_hash[T any](i *Index[T], h *xxh3.Hasher, key []any) (Hash, bool) {
sum, zero := hash_sum(i.fields, h, key)
if zero && !allow_zero(i.flags) {
var zero Hash
return zero, false
}
return sum, true
}
// Append to result's indexkeys.
res.keys = append(res.keys, ikey)
func index_get[T any](i *Index[T], hash Hash, key []any) *list {
l := i.data[hash]
if l == nil {
return nil
}
entry := (*index_entry)(l.head.data)
if !is_equal(entry.key, key) {
return l
}
return l
}
func index_append[T any](c *Cache[T], i *Index[T], hash Hash, key []any, res *result) {
// Get list at key.
l := i.data[key]
l := i.data[hash]
if l == nil {
// Allocate new list.
l = list_acquire(c)
i.data[key] = l
l = list_acquire()
i.data[hash] = l
} else if i.unique {
} else if entry := (*index_entry)(l.head.data); //nocollapse
!is_equal(entry.key, key) {
// Remove currently
// indexed result.
old := l.head
l.remove(old)
// Collision! Drop all.
delete(i.data, hash)
// Iterate entries in list.
for x := 0; x < l.len; x++ {
// Pop current head.
list_remove(l, l.head)
// Extract result.
res := entry.result
// Drop index entry from res.
result_drop_index(res, i)
if len(res.indexed) == 0 {
// Old res now unused,
// release to mem pool.
result_release(c, res)
}
}
return
} else if is_unique(i.flags) {
// Remove current
// indexed entry.
list_remove(l, l.head)
// Get ptr to old
// result before we
// entry before we
// release to pool.
res := old.Value
res := entry.result
// Drop this index's key from
// old res now not indexed here.
result_dropIndex(c, res, i)
if len(res.keys) == 0 {
result_drop_index(res, i)
if len(res.indexed) == 0 {
// Old res now unused,
// release to mem pool.
@ -114,100 +231,162 @@ func index_append[T any](c *Cache[T], i *Index[T], key uint64, res *result[T]) {
}
}
// Add result indexkey to
// front of results list.
l.pushFront(&ikey.entry)
// Acquire + setup index entry.
entry := index_entry_acquire()
entry.index = unsafe.Pointer(i)
entry.result = res
entry.key = key
entry.hash = hash
// Append to result's indexed entries.
res.indexed = append(res.indexed, entry)
// Add index entry to index list.
list_push_front(l, &entry.elem)
}
func index_deleteOne[T any](c *Cache[T], i *Index[T], ikey *indexkey[T]) {
// Get list at key.
l := i.data[ikey.key]
if l == nil {
return
}
// Remove from list.
l.remove(&ikey.entry)
if l.len == 0 {
// Remove list from map.
delete(i.data, ikey.key)
// Release list to pool.
list_release(c, l)
}
}
func index_delete[T any](c *Cache[T], i *Index[T], key uint64, fn func(*result[T])) {
func index_delete[T any](c *Cache[T], i *Index[T], hash Hash, key []any, fn func(*result)) {
if fn == nil {
panic("nil fn")
}
// Get list at key.
l := i.data[key]
// Get list at hash.
l := i.data[hash]
if l == nil {
return
}
// Delete data at key.
delete(i.data, key)
entry := (*index_entry)(l.head.data)
// Iterate results in list.
// Check contains expected key for hash.
if !is_equal(entry.key, key) {
return
}
// Delete data at hash.
delete(i.data, hash)
// Iterate entries in list.
for x := 0; x < l.len; x++ {
// Pop current head.
res := l.head.Value
l.remove(l.head)
entry := (*index_entry)(l.head.data)
list_remove(l, l.head)
// Delete index's key
// from result tracking.
result_dropIndex(c, res, i)
// Extract result.
res := entry.result
// Call hook.
fn(res)
// Drop index entry from res.
result_drop_index(res, i)
}
// Release list to pool.
list_release(c, l)
// Release to pool.
list_release(l)
}
type indexkey[T any] struct {
// linked list entry the related
// result is stored under in the
// Index.data[key] linked list.
entry elem[*result[T]]
func index_delete_entry[T any](c *Cache[T], entry *index_entry) {
// Get from entry.
i := (*Index[T])(entry.index)
// key is the generated index key
// the related result is indexed
// under, in the below index.
key uint64
// index is the index that the
// related result is indexed in.
index *Index[T]
}
func indexkey_acquire[T any](c *Cache[T]) *indexkey[T] {
var ikey *indexkey[T]
if len(c.keyPool) == 0 {
// Allocate new key.
ikey = new(indexkey[T])
} else {
// Pop result from pool slice.
ikey = c.keyPool[len(c.keyPool)-1]
c.keyPool = c.keyPool[:len(c.keyPool)-1]
// Get list at hash sum.
l := i.data[entry.hash]
if l == nil {
return
}
return ikey
// Remove entry from list.
list_remove(l, &entry.elem)
if l.len == 0 {
// Remove list from map.
delete(i.data, entry.hash)
// Release to pool.
list_release(l)
}
// Extract result.
res := entry.result
// Drop index entry from res.
result_drop_index(res, i)
}
func indexkey_release[T any](c *Cache[T], ikey *indexkey[T]) {
// Reset indexkey.
ikey.entry.Value = nil
ikey.key = 0
ikey.index = nil
var entry_pool sync.Pool
// Release indexkey to memory pool.
c.keyPool = append(c.keyPool, ikey)
type index_entry struct {
// elem contains the list element
// appended to each per-hash list
// within the Index{} type. the
// contained value is a self-ref.
elem list_elem
// index is the Index{} this
// index_entry{} is stored in.
index unsafe.Pointer
// result is the actual
// underlying result stored
// within the index. this
// also contains a ref to
// this *index_entry in order
// to track indices each result
// is currently stored under.
result *result
// key contains the actual
// key this item was stored
// under, used for collision
// check.
key []any
// hash contains computed
// hash checksum of .key.
hash Hash
}
func index_entry_acquire() *index_entry {
// Acquire from pool.
v := entry_pool.Get()
if v == nil {
v = new(index_entry)
}
// Cast index_entry value.
entry := v.(*index_entry)
// Set index list elem entry on itself.
entry.elem.data = unsafe.Pointer(entry)
return entry
}
func index_entry_release(entry *index_entry) {
var zero Hash
// Reset index entry.
entry.elem.data = nil
entry.index = nil
entry.result = nil
entry.key = nil
entry.hash = zero
// Release to pool.
entry_pool.Put(entry)
}
// is_equal returns whether 2 key slices are equal.
func is_equal(k1, k2 []any) bool {
if len(k1) != len(k2) {
return false
}
for i := range k1 {
if k1[i] != k2[i] {
return false
}
}
return true
}

View file

@ -1,49 +1,55 @@
package structr
// elem represents an element
import (
"sync"
"unsafe"
)
var list_pool sync.Pool
// elem represents an elem
// in a doubly-linked list.
type elem[T any] struct {
next *elem[T]
prev *elem[T]
Value T
type list_elem struct {
next *list_elem
prev *list_elem
// data is a ptr to the
// value this linked list
// element is embedded-in.
data unsafe.Pointer
}
// list implements a doubly-linked list, where:
// - head = index 0 (i.e. the front)
// - tail = index n-1 (i.e. the back)
type list[T any] struct {
head *elem[T]
tail *elem[T]
type list struct {
head *list_elem
tail *list_elem
len int
}
func list_acquire[T any](c *Cache[T]) *list[*result[T]] {
var l *list[*result[T]]
if len(c.llsPool) == 0 {
// Allocate new list.
l = new(list[*result[T]])
} else {
// Pop list from pool slice.
l = c.llsPool[len(c.llsPool)-1]
c.llsPool = c.llsPool[:len(c.llsPool)-1]
func list_acquire() *list {
// Acquire from pool.
v := list_pool.Get()
if v == nil {
v = new(list)
}
return l
// Cast list value.
return v.(*list)
}
func list_release[T any](c *Cache[T], l *list[*result[T]]) {
func list_release(l *list) {
// Reset list.
l.head = nil
l.tail = nil
l.len = 0
// Release list to memory pool.
c.llsPool = append(c.llsPool, l)
// Release to pool.
list_pool.Put(l)
}
// pushFront pushes new 'elem' to front of list.
func (l *list[T]) pushFront(elem *elem[T]) {
func list_push_front(l *list, elem *list_elem) {
if l.len == 0 {
// Set new tail + head
l.head = elem
@ -71,14 +77,12 @@ func (l *list[T]) pushFront(elem *elem[T]) {
l.len++
}
// moveFront calls remove() on elem, followed by pushFront().
func (l *list[T]) moveFront(elem *elem[T]) {
l.remove(elem)
l.pushFront(elem)
func list_move_front(l *list, elem *list_elem) {
list_remove(l, elem)
list_push_front(l, elem)
}
// remove removes the 'elem' from the list.
func (l *list[T]) remove(elem *elem[T]) {
func list_remove(l *list, elem *list_elem) {
if l.len <= 1 {
// Drop elem's links
elem.next = nil
@ -117,8 +121,7 @@ func (l *list[T]) remove(elem *elem[T]) {
l.len--
}
// rangefn ranges all the elements in the list, passing each to fn.
func (l *list[T]) rangefn(fn func(*elem[T])) {
func list_rangefn(l *list, fn func(*list_elem)) {
if fn == nil {
panic("nil fn")
}

View file

@ -1,75 +1,77 @@
package structr
type result[T any] struct {
// linked list entry this result is
import (
"sync"
"unsafe"
)
var result_pool sync.Pool
type result struct {
// linked list elem this result is
// stored under in Cache.lruList.
entry elem[*result[T]]
elem list_elem
// keys tracks the indices
// result is stored under.
keys []*indexkey[T]
// indexed stores the indices
// this result is stored under.
indexed []*index_entry
// cached value.
value T
// cached error.
err error
// cached data (we maintain
// the type data here using
// an interface as any one
// instance can be T / error).
data interface{}
}
func result_acquire[T any](c *Cache[T]) *result[T] {
var res *result[T]
if len(c.resPool) == 0 {
// Allocate new result.
res = new(result[T])
} else {
// Pop result from pool slice.
res = c.resPool[len(c.resPool)-1]
c.resPool = c.resPool[:len(c.resPool)-1]
func result_acquire[T any](c *Cache[T]) *result {
// Acquire from pool.
v := result_pool.Get()
if v == nil {
v = new(result)
}
// Push to front of LRU list.
c.lruList.pushFront(&res.entry)
res.entry.Value = res
// Cast result value.
res := v.(*result)
// Push result elem to front of LRU list.
list_push_front(&c.lruList, &res.elem)
res.elem.data = unsafe.Pointer(res)
return res
}
func result_release[T any](c *Cache[T], res *result[T]) {
// Remove from the LRU list.
c.lruList.remove(&res.entry)
res.entry.Value = nil
var zero T
func result_release[T any](c *Cache[T], res *result) {
// Remove result elem from LRU list.
list_remove(&c.lruList, &res.elem)
res.elem.data = nil
// Reset all result fields.
res.keys = res.keys[:0]
res.value = zero
res.err = nil
res.indexed = res.indexed[:0]
res.data = nil
// Release result to memory pool.
c.resPool = append(c.resPool, res)
// Release to pool.
result_pool.Put(res)
}
func result_dropIndex[T any](c *Cache[T], res *result[T], index *Index[T]) {
for i := 0; i < len(res.keys); i++ {
func result_drop_index[T any](res *result, index *Index[T]) {
for i := 0; i < len(res.indexed); i++ {
if res.keys[i].index != index {
if res.indexed[i].index != unsafe.Pointer(index) {
// Prof. Obiwan:
// this is not the index
// we are looking for.
continue
}
// Get index key ptr.
ikey := res.keys[i]
// Get index entry ptr.
entry := res.indexed[i]
// Move all index keys down + reslice.
copy(res.keys[i:], res.keys[i+1:])
res.keys = res.keys[:len(res.keys)-1]
// Move all index entries down + reslice.
copy(res.indexed[i:], res.indexed[i+1:])
res.indexed = res.indexed[:len(res.indexed)-1]
// Release ikey to memory pool.
indexkey_release(c, ikey)
// Release to memory pool.
index_entry_release(entry)
return
}

134
vendor/codeberg.org/gruf/go-structr/runtime.go generated vendored Normal file
View file

@ -0,0 +1,134 @@
package structr
import (
"fmt"
"reflect"
"unicode"
"unicode/utf8"
"unsafe"
"github.com/modern-go/reflect2"
"github.com/zeebo/xxh3"
)
type structfield struct {
// _type is the runtime type pointer
// underlying the struct field type.
// used for repacking our own erfaces.
_type reflect2.Type
// offset is the offset in memory
// of this struct field from the
// outer-most value ptr location.
offset uintptr
// hasher is the relevant function
// for hashing value of structfield
// into the supplied hashbuf, where
// return value indicates if zero.
hasher func(*xxh3.Hasher, any) bool
}
// find_field will search for a struct field with given set of names,
// where names is a len > 0 slice of names account for struct nesting.
func find_field(t reflect.Type, names []string) (sfield structfield) {
var (
// is_exported returns whether name is exported
// from a package; can be func or struct field.
is_exported = func(name string) bool {
r, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(r)
}
// pop_name pops the next name from
// the provided slice of field names.
pop_name = func() string {
name := names[0]
names = names[1:]
if !is_exported(name) {
panicf("field is not exported: %s", name)
}
return name
}
// field is the iteratively searched
// struct field value in below loop.
field reflect.StructField
)
switch {
// The only 2 types we support are
// structs, and ptrs to a struct.
case t.Kind() == reflect.Struct:
case t.Kind() == reflect.Pointer &&
t.Elem().Kind() == reflect.Struct:
t = t.Elem()
default:
panic("index only support struct{} and *struct{}")
}
for len(names) > 0 {
var ok bool
// Pop next name.
name := pop_name()
// Check for valid struct type.
if t.Kind() != reflect.Struct {
panicf("field %s is not struct: %s", t, name)
}
// Look for next field by name.
field, ok = t.FieldByName(name)
if !ok {
panicf("unknown field: %s", name)
}
// Increment total field offset.
sfield.offset += field.Offset
// Set the next type.
t = field.Type
}
// Get field type as reflect2.
sfield._type = reflect2.Type2(t)
// Find hasher for type.
sfield.hasher = hasher(t)
return
}
// extract_fields extracts given structfields from the provided value type,
// this is done using predetermined struct field memory offset locations.
func extract_fields[T any](value T, fields []structfield) []any {
// Get ptr to raw value data.
ptr := unsafe.Pointer(&value)
// If this is a pointer type deref the value ptr.
if reflect.TypeOf(value).Kind() == reflect.Pointer {
ptr = *(*unsafe.Pointer)(ptr)
}
// Prepare slice of field ifaces.
ifaces := make([]any, len(fields))
for i := 0; i < len(fields); i++ {
// Manually access field at memory offset and pack eface.
ptr := unsafe.Pointer(uintptr(ptr) + fields[i].offset)
ifaces[i] = fields[i]._type.UnsafeIndirect(ptr)
}
return ifaces
}
// data_ptr returns the runtime data ptr associated with value.
func data_ptr(a any) unsafe.Pointer {
return (*struct{ t, v unsafe.Pointer })(unsafe.Pointer(&a)).v
}
// panicf provides a panic with string formatting.
func panicf(format string, args ...any) {
panic(fmt.Sprintf(format, args...))
}

5
vendor/codeberg.org/gruf/go-structr/test.sh generated vendored Normal file
View file

@ -0,0 +1,5 @@
#!/bin/sh
set -e
go test -v -tags=structr_32bit_hash .
go test -v -tags=structr_48bit_hash .
go test -v -tags=structr_64bit_hash .

View file

@ -1,98 +0,0 @@
package structr
import (
"fmt"
"reflect"
"sync"
"unicode"
"unicode/utf8"
"github.com/zeebo/xxh3"
)
// findField will search for a struct field with given set of names, where names is a len > 0 slice of names account for nesting.
func findField(t reflect.Type, names []string, allowZero bool) (sfield structfield, ok bool) {
var (
// isExported returns whether name is exported
// from a package; can be func or struct field.
isExported = func(name string) bool {
r, _ := utf8.DecodeRuneInString(name)
return unicode.IsUpper(r)
}
// popName pops the next name from
// the provided slice of field names.
popName = func() string {
// Pop next name.
name := names[0]
names = names[1:]
// Ensure valid name.
if !isExported(name) {
panicf("field is not exported: %s", name)
}
return name
}
// field is the iteratively searched-for
// struct field value in below loop.
field reflect.StructField
)
for len(names) > 0 {
// Pop next name.
name := popName()
// Follow any ptrs leading to field.
for t.Kind() == reflect.Pointer {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
// The end type after following ptrs must be struct.
panicf("field %s is not struct (ptr): %s", t, name)
}
// Look for next field by name.
field, ok = t.FieldByName(name)
if !ok {
return
}
// Append next set of indices required to reach field.
sfield.index = append(sfield.index, field.Index...)
// Set the next type.
t = field.Type
}
// Get final type hash func.
sfield.hasher = hasher(t)
return
}
// panicf provides a panic with string formatting.
func panicf(format string, args ...any) {
panic(fmt.Sprintf(format, args...))
}
// hashPool provides a memory pool of xxh3
// hasher objects used indexing field vals.
var hashPool sync.Pool
// gethashbuf fetches hasher from memory pool.
func getHasher() *xxh3.Hasher {
v := hashPool.Get()
if v == nil {
v = new(xxh3.Hasher)
}
return v.(*xxh3.Hasher)
}
// putHasher replaces hasher in memory pool.
func putHasher(h *xxh3.Hasher) {
h.Reset()
hashPool.Put(h)
}

2
vendor/modules.txt vendored
View file

@ -56,7 +56,7 @@ codeberg.org/gruf/go-sched
## explicit; go 1.19
codeberg.org/gruf/go-store/v2/storage
codeberg.org/gruf/go-store/v2/util
# codeberg.org/gruf/go-structr v0.2.0
# codeberg.org/gruf/go-structr v0.3.0
## explicit; go 1.21
codeberg.org/gruf/go-structr
# codeberg.org/superseriousbusiness/exif-terminator v0.7.0