mirror of
https://github.com/woodpecker-ci/woodpecker.git
synced 2024-11-27 04:11:03 +00:00
Added vendored dependency for github.com/koding/cache
This commit is contained in:
parent
eb04d418d9
commit
1b55587d24
15 changed files with 661 additions and 0 deletions
32
vendor/github.com/koding/cache/README.md
generated
vendored
Normal file
32
vendor/github.com/koding/cache/README.md
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
# Cache [![GoDoc](https://godoc.org/github.com/koding/cache?status.svg)](https://godoc.org/github.com/koding/cache) [![Build Status](https://travis-ci.org/koding/cache.svg?branch=master)](https://travis-ci.org/koding/cache)
|
||||||
|
|
||||||
|
|
||||||
|
Cache is a backend provider for common use cases
|
||||||
|
|
||||||
|
## Install and Usage
|
||||||
|
|
||||||
|
Install the package with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get github.com/koding/cache
|
||||||
|
```
|
||||||
|
|
||||||
|
Import it with:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/koding/cache"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Example
|
||||||
|
```go
|
||||||
|
|
||||||
|
// create a cache with 2 second TTL
|
||||||
|
cache := NewMemoryWithTTL(2 * time.Second)
|
||||||
|
// start garbage collection for expired keys
|
||||||
|
cache.StartGC(time.Millisecond * 10)
|
||||||
|
// set item
|
||||||
|
err := cache.Set("test_key", "test_data")
|
||||||
|
// get item
|
||||||
|
data, err := cache.Get("test_key")
|
||||||
|
```
|
15
vendor/github.com/koding/cache/cache.go
generated
vendored
Normal file
15
vendor/github.com/koding/cache/cache.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
// Cache is the contract for all of the cache backends that are supported by
|
||||||
|
// this package
|
||||||
|
type Cache interface {
|
||||||
|
// Get returns single item from the backend if the requested item is not
|
||||||
|
// found, returns NotFound err
|
||||||
|
Get(key string) (interface{}, error)
|
||||||
|
|
||||||
|
// Set sets a single item to the backend
|
||||||
|
Set(key string, value interface{}) error
|
||||||
|
|
||||||
|
// Delete deletes single item from backend
|
||||||
|
Delete(key string) error
|
||||||
|
}
|
9
vendor/github.com/koding/cache/doc.go
generated
vendored
Normal file
9
vendor/github.com/koding/cache/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
// Package cache provides basic caching mechanisms for Go(lang) projects.
|
||||||
|
//
|
||||||
|
// Currently supported caching algorithms:
|
||||||
|
// MemoryNoTS: provides a non-thread safe in-memory caching system
|
||||||
|
// Memory : provides a thread safe in-memory caching system, built on top of MemoryNoTS cache
|
||||||
|
// LRUNoTS : provides a non-thread safe, fixed size in-memory caching system, built on top of MemoryNoTS cache
|
||||||
|
// LRU : provides a thread safe, fixed size in-memory caching system, built on top of LRUNoTS cache
|
||||||
|
// MemoryTTL : provides a thread safe, expiring in-memory caching system, built on top of MemoryNoTS cache
|
||||||
|
package cache
|
8
vendor/github.com/koding/cache/errors.go
generated
vendored
Normal file
8
vendor/github.com/koding/cache/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotFound holds exported `not found error` for not found items
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
)
|
83
vendor/github.com/koding/cache/helper_test.go
generated
vendored
Normal file
83
vendor/github.com/koding/cache/helper_test.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func testCacheGetSet(t *testing.T, cache Cache) {
|
||||||
|
err := cache.Set("test_key", "test_data")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("should not give err while setting item")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cache.Set("test_key2", "test_data2")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("should not give err while setting item")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := cache.Get("test_key")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("test_key should be in the cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
if data != "test_data" {
|
||||||
|
t.Fatal("data is not \"test_data\"")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err = cache.Get("test_key2")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("test_key2 should be in the cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
if data != "test_data2" {
|
||||||
|
t.Fatal("data is not \"test_data2\"")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCacheNilValue(t *testing.T, cache Cache) {
|
||||||
|
err := cache.Set("test_key", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("should not give err while setting item")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := cache.Get("test_key")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("test_key should be in the cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
if data != nil {
|
||||||
|
t.Fatal("data is not nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cache.Delete("test_key")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("should not give err while setting item")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err = cache.Get("test_key")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("test_key should not be in the cache")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCacheDelete(t *testing.T, cache Cache) {
|
||||||
|
cache.Set("test_key", "test_data")
|
||||||
|
cache.Set("test_key2", "test_data2")
|
||||||
|
|
||||||
|
err := cache.Delete("test_key3")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("non-exiting item should not give error")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cache.Delete("test_key")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("exiting item should not give error")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := cache.Get("test_key")
|
||||||
|
if err != ErrNotFound {
|
||||||
|
t.Fatal("test_key should not be in the cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
if data != nil {
|
||||||
|
t.Fatal("data should be nil")
|
||||||
|
}
|
||||||
|
}
|
51
vendor/github.com/koding/cache/lru.go
generated
vendored
Normal file
51
vendor/github.com/koding/cache/lru.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// LRU Discards the least recently used items first. This algorithm
|
||||||
|
// requires keeping track of what was used when.
|
||||||
|
type LRU struct {
|
||||||
|
// Mutex is used for handling the concurrent
|
||||||
|
// read/write requests for cache
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
// cache holds the all cache values
|
||||||
|
cache Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLRU creates a thread-safe LRU cache
|
||||||
|
func NewLRU(size int) Cache {
|
||||||
|
return &LRU{
|
||||||
|
cache: NewLRUNoTS(size),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value of a given key if it exists, every get item will be
|
||||||
|
// moved to the head of the linked list for keeping track of least recent used
|
||||||
|
// item
|
||||||
|
func (l *LRU) Get(key string) (interface{}, error) {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
return l.cache.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets or overrides the given key with the given value, every set item will
|
||||||
|
// be moved or prepended to the head of the linked list for keeping track of
|
||||||
|
// least recent used item. When the cache is full, last item of the linked list
|
||||||
|
// will be evicted from the cache
|
||||||
|
func (l *LRU) Set(key string, val interface{}) error {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
return l.cache.Set(key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the given key-value pair from cache, this function doesnt
|
||||||
|
// return an error if item is not in the cache
|
||||||
|
func (l *LRU) Delete(key string) error {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
|
||||||
|
return l.cache.Delete(key)
|
||||||
|
}
|
122
vendor/github.com/koding/cache/lru_nots.go
generated
vendored
Normal file
122
vendor/github.com/koding/cache/lru_nots.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LRUNoTS Discards the least recently used items first. This algorithm
|
||||||
|
// requires keeping track of what was used when.
|
||||||
|
type LRUNoTS struct {
|
||||||
|
// list holds all items in a linked list, for finding the `tail` of the list
|
||||||
|
list *list.List
|
||||||
|
|
||||||
|
// cache holds the all cache values
|
||||||
|
cache Cache
|
||||||
|
|
||||||
|
// size holds the limit of the LRU cache
|
||||||
|
size int
|
||||||
|
}
|
||||||
|
|
||||||
|
// kv is an helper struct for keeping track of the key for the list item. Only
|
||||||
|
// place where we need the key of a value is while removing the last item from
|
||||||
|
// linked list, for other cases, all operations alread have the key
|
||||||
|
type kv struct {
|
||||||
|
k string
|
||||||
|
v interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLRUNoTS creates a new LRU cache struct for further cache operations. Size
|
||||||
|
// is used for limiting the upper bound of the cache
|
||||||
|
func NewLRUNoTS(size int) Cache {
|
||||||
|
if size < 1 {
|
||||||
|
panic("invalid cache size")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &LRUNoTS{
|
||||||
|
list: list.New(),
|
||||||
|
cache: NewMemoryNoTS(),
|
||||||
|
size: size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value of a given key if it exists, every get item will be
|
||||||
|
// moved to the head of the linked list for keeping track of least recent used
|
||||||
|
// item
|
||||||
|
func (l *LRUNoTS) Get(key string) (interface{}, error) {
|
||||||
|
res, err := l.cache.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
elem := res.(*list.Element)
|
||||||
|
// move found item to the head
|
||||||
|
l.list.MoveToFront(elem)
|
||||||
|
|
||||||
|
return elem.Value.(*kv).v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets or overrides the given key with the given value, every set item will
|
||||||
|
// be moved or prepended to the head of the linked list for keeping track of
|
||||||
|
// least recent used item. When the cache is full, last item of the linked list
|
||||||
|
// will be evicted from the cache
|
||||||
|
func (l *LRUNoTS) Set(key string, val interface{}) error {
|
||||||
|
// try to get item
|
||||||
|
res, err := l.cache.Get(key)
|
||||||
|
if err != nil && err != ErrNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var elem *list.Element
|
||||||
|
|
||||||
|
// if elem is not in the cache, push it to front of the list
|
||||||
|
if err == ErrNotFound {
|
||||||
|
elem = l.list.PushFront(&kv{k: key, v: val})
|
||||||
|
} else {
|
||||||
|
// if elem is in the cache, update the data and move it the front
|
||||||
|
elem = res.(*list.Element)
|
||||||
|
|
||||||
|
// update the data
|
||||||
|
elem.Value.(*kv).v = val
|
||||||
|
|
||||||
|
// item already exists, so move it to the front of the list
|
||||||
|
l.list.MoveToFront(elem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// in any case, set the item to the cache
|
||||||
|
err = l.cache.Set(key, elem)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the cache is full, evict last entry
|
||||||
|
if l.list.Len() > l.size {
|
||||||
|
// remove last element from cache
|
||||||
|
return l.removeElem(l.list.Back())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the given key-value pair from cache, this function doesnt
|
||||||
|
// return an error if item is not in the cache
|
||||||
|
func (l *LRUNoTS) Delete(key string) error {
|
||||||
|
res, err := l.cache.Get(key)
|
||||||
|
if err != nil && err != ErrNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// item already deleted
|
||||||
|
if err == ErrNotFound {
|
||||||
|
// surpress not found errors
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
elem := res.(*list.Element)
|
||||||
|
|
||||||
|
return l.removeElem(elem)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LRUNoTS) removeElem(e *list.Element) error {
|
||||||
|
l.list.Remove(e)
|
||||||
|
return l.cache.Delete(e.Value.(*kv).k)
|
||||||
|
}
|
33
vendor/github.com/koding/cache/lru_nots_test.go
generated
vendored
Normal file
33
vendor/github.com/koding/cache/lru_nots_test.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestLRUNoTSGetSet(t *testing.T) {
|
||||||
|
cache := NewLRUNoTS(2)
|
||||||
|
testCacheGetSet(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRUNoTSEviction(t *testing.T) {
|
||||||
|
cache := NewLRUNoTS(2)
|
||||||
|
testCacheGetSet(t, cache)
|
||||||
|
|
||||||
|
err := cache.Set("test_key3", "test_data3")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("should not give err while setting item")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = cache.Get("test_key")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("test_key should not be in the cache")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRUNoTSDelete(t *testing.T) {
|
||||||
|
cache := NewLRUNoTS(2)
|
||||||
|
testCacheDelete(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRUNoTSNilValue(t *testing.T) {
|
||||||
|
cache := NewLRUNoTS(2)
|
||||||
|
testCacheNilValue(t, cache)
|
||||||
|
}
|
33
vendor/github.com/koding/cache/lru_test.go
generated
vendored
Normal file
33
vendor/github.com/koding/cache/lru_test.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestLRUGetSet(t *testing.T) {
|
||||||
|
cache := NewLRU(2)
|
||||||
|
testCacheGetSet(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRUEviction(t *testing.T) {
|
||||||
|
cache := NewLRU(2)
|
||||||
|
testCacheGetSet(t, cache)
|
||||||
|
|
||||||
|
err := cache.Set("test_key3", "test_data3")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("should not give err while setting item")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = cache.Get("test_key")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("test_key should not be in the cache")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRUDelete(t *testing.T) {
|
||||||
|
cache := NewLRU(2)
|
||||||
|
testCacheDelete(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRUNilValue(t *testing.T) {
|
||||||
|
cache := NewLRU(2)
|
||||||
|
testCacheNilValue(t, cache)
|
||||||
|
}
|
46
vendor/github.com/koding/cache/memory.go
generated
vendored
Normal file
46
vendor/github.com/koding/cache/memory.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// Memory provides an inmemory caching mechanism
|
||||||
|
type Memory struct {
|
||||||
|
// Mutex is used for handling the concurrent
|
||||||
|
// read/write requests for cache
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
// cache holds the cache data
|
||||||
|
cache Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemory creates an inmemory cache system
|
||||||
|
// Which everytime will return the true value about a cache hit
|
||||||
|
func NewMemory() Cache {
|
||||||
|
return &Memory{
|
||||||
|
cache: NewMemoryNoTS(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value of a given key if it exists
|
||||||
|
func (r *Memory) Get(key string) (interface{}, error) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
return r.cache.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets a value to the cache or overrides existing one with the given value
|
||||||
|
func (r *Memory) Set(key string, value interface{}) error {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
return r.cache.Set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the given key-value pair from cache, this function doesnt
|
||||||
|
// return an error if item is not in the cache
|
||||||
|
func (r *Memory) Delete(key string) error {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
return r.cache.Delete(key)
|
||||||
|
}
|
39
vendor/github.com/koding/cache/memory_nots.go
generated
vendored
Normal file
39
vendor/github.com/koding/cache/memory_nots.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
// MemoryNoTS provides a non-thread safe caching mechanism
|
||||||
|
type MemoryNoTS struct {
|
||||||
|
// items holds the cache data
|
||||||
|
items map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemoryNoTS creates MemoryNoTS struct
|
||||||
|
func NewMemoryNoTS() *MemoryNoTS {
|
||||||
|
return &MemoryNoTS{
|
||||||
|
items: map[string]interface{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a value of a given key if it exists
|
||||||
|
// and valid for the time being
|
||||||
|
func (r *MemoryNoTS) Get(key string) (interface{}, error) {
|
||||||
|
value, ok := r.items[key]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set will persist a value to the cache or
|
||||||
|
// override existing one with the new one
|
||||||
|
func (r *MemoryNoTS) Set(key string, value interface{}) error {
|
||||||
|
r.items[key] = value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes a given key, it doesnt return error if the item is not in the
|
||||||
|
// system
|
||||||
|
func (r *MemoryNoTS) Delete(key string) error {
|
||||||
|
delete(r.items, key)
|
||||||
|
return nil
|
||||||
|
}
|
18
vendor/github.com/koding/cache/memory_nots_test.go
generated
vendored
Normal file
18
vendor/github.com/koding/cache/memory_nots_test.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestMemoryCacheNoTSGetSet(t *testing.T) {
|
||||||
|
cache := NewMemoryNoTS()
|
||||||
|
testCacheGetSet(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryCacheNoTSDelete(t *testing.T) {
|
||||||
|
cache := NewMemoryNoTS()
|
||||||
|
testCacheDelete(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryCacheNoTSNilValue(t *testing.T) {
|
||||||
|
cache := NewMemoryNoTS()
|
||||||
|
testCacheNilValue(t, cache)
|
||||||
|
}
|
18
vendor/github.com/koding/cache/memory_test.go
generated
vendored
Normal file
18
vendor/github.com/koding/cache/memory_test.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestMemoryGetSet(t *testing.T) {
|
||||||
|
cache := NewMemory()
|
||||||
|
testCacheGetSet(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryDelete(t *testing.T) {
|
||||||
|
cache := NewMemory()
|
||||||
|
testCacheDelete(t, cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryNilValue(t *testing.T) {
|
||||||
|
cache := NewMemory()
|
||||||
|
testCacheNilValue(t, cache)
|
||||||
|
}
|
111
vendor/github.com/koding/cache/memory_ttl.go
generated
vendored
Normal file
111
vendor/github.com/koding/cache/memory_ttl.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var zeroTTL = time.Duration(0)
|
||||||
|
|
||||||
|
// MemoryTTL holds the required variables to compose an in memory cache system
|
||||||
|
// which also provides expiring key mechanism
|
||||||
|
type MemoryTTL struct {
|
||||||
|
// Mutex is used for handling the concurrent
|
||||||
|
// read/write requests for cache
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
// cache holds the cache data
|
||||||
|
cache *MemoryNoTS
|
||||||
|
|
||||||
|
// setAts holds the time that related item's set at
|
||||||
|
setAts map[string]time.Time
|
||||||
|
|
||||||
|
// ttl is a duration for a cache key to expire
|
||||||
|
ttl time.Duration
|
||||||
|
|
||||||
|
// gcInterval is a duration for garbage collection
|
||||||
|
gcInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemoryWithTTL creates an inmemory cache system
|
||||||
|
// Which everytime will return the true values about a cache hit
|
||||||
|
// and never will leak memory
|
||||||
|
// ttl is used for expiration of a key from cache
|
||||||
|
func NewMemoryWithTTL(ttl time.Duration) *MemoryTTL {
|
||||||
|
return &MemoryTTL{
|
||||||
|
cache: NewMemoryNoTS(),
|
||||||
|
setAts: map[string]time.Time{},
|
||||||
|
ttl: ttl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartGC starts the garbage collection process in a go routine
|
||||||
|
func (r *MemoryTTL) StartGC(gcInterval time.Duration) {
|
||||||
|
r.gcInterval = gcInterval
|
||||||
|
go func() {
|
||||||
|
for _ = range time.Tick(gcInterval) {
|
||||||
|
for key := range r.cache.items {
|
||||||
|
if !r.isValid(key) {
|
||||||
|
r.Delete(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a value of a given key if it exists
|
||||||
|
// and valid for the time being
|
||||||
|
func (r *MemoryTTL) Get(key string) (interface{}, error) {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
if !r.isValid(key) {
|
||||||
|
r.delete(key)
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := r.cache.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set will persist a value to the cache or
|
||||||
|
// override existing one with the new one
|
||||||
|
func (r *MemoryTTL) Set(key string, value interface{}) error {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
r.cache.Set(key, value)
|
||||||
|
r.setAts[key] = time.Now()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes a given key if exists
|
||||||
|
func (r *MemoryTTL) Delete(key string) error {
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
|
||||||
|
r.delete(key)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MemoryTTL) delete(key string) {
|
||||||
|
r.cache.Delete(key)
|
||||||
|
delete(r.setAts, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *MemoryTTL) isValid(key string) bool {
|
||||||
|
setAt, ok := r.setAts[key]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.ttl == zeroTTL {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return setAt.Add(r.ttl).After(time.Now())
|
||||||
|
}
|
43
vendor/github.com/koding/cache/memory_ttl_test.go
generated
vendored
Normal file
43
vendor/github.com/koding/cache/memory_ttl_test.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMemoryCacheGetSet(t *testing.T) {
|
||||||
|
cache := NewMemoryWithTTL(2 * time.Second)
|
||||||
|
cache.StartGC(time.Millisecond * 10)
|
||||||
|
cache.Set("test_key", "test_data")
|
||||||
|
data, err := cache.Get("test_key")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("data not found")
|
||||||
|
}
|
||||||
|
if data != "test_data" {
|
||||||
|
t.Fatal("data is not \"test_data\"")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryCacheTTL(t *testing.T) {
|
||||||
|
cache := NewMemoryWithTTL(100 * time.Millisecond)
|
||||||
|
cache.StartGC(time.Millisecond * 10)
|
||||||
|
cache.Set("test_key", "test_data")
|
||||||
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
_, err := cache.Get("test_key")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("data found")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryCacheTTLNilValue(t *testing.T) {
|
||||||
|
cache := NewMemoryWithTTL(100 * time.Millisecond)
|
||||||
|
cache.StartGC(time.Millisecond * 10)
|
||||||
|
cache.Set("test_key", nil)
|
||||||
|
data, err := cache.Get("test_key")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("data found")
|
||||||
|
}
|
||||||
|
if data != nil {
|
||||||
|
t.Fatal("data is not null")
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue