mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-15 22:01:08 +00:00
54acfa8880
Summary: - Move existing test under a `testify` Suite as `baseRedisWithServerTestSuite` - Those tests require real redis server. - Add `go.uber.org/mock/mockgen@latest` as dependency - as a tool (Makefile). - in the `go.mod` file. - Mock redis client lives under a `mock` directory under the queue module. - That mock module has an extra hand-written mock in-memory redis-like struct. - Add tests using the mock redis client. - Changed the logic around queue provider creation. - Now the `getNewQueue` returns a Queue provider directly, not an init function to create it. The whole Queue module is close to impossible to test properly because everything is private, everything goes through a struct route. Because of that, we can't test for example what keys are used for given queue. To overcome this, as a first step I removed one step from that hard route by allowing custom calls to create new queue provider. To achieve this, I moved the creation logic into the `getNewQueue` (previously it was `getNewQueueFn`). That changes nothing on that side, everything goes as before, except the `newXXX` call happens directly in that function and not outside that. That made it possible to add extra provider specific parameters to those function (`newXXX`). For example a client on redis. Calling it through the `getNewQueue` function, it gets `nil`. - If the provided client is not `nil`, it will use that instead of the connection string. - If it's `nil` (default behaviour), it creates a new redis client as it did before, no changes to that. The rest of the provider code is unchanged. All these changes were required to make it possible to generate mock clients for providers and use them. For the tests, the existing two test cases are good with redis server, and they need some extra helpers, for example to start a new redis server if required, or waiting on a redis server to be ready to use. These helpers are only required for test cases using real redis server. For better isolation, moved existing test under a testify Suite, and moved them into a new test file called `base_redis_with_server_test.go` because, well they test the code with server. These tests do exactly the same as before, calling the same sub-tests the same way as before, the only change is the structure of the test (remove repetition, scope server related helper functions). Finally, we can create unit tests without redis server. The main focus of this group of tests are higher level overview of operations. With the mock redis client we can set up expectations about used queue names, received values, return value to simulate faulty state. These new unit test functions don't test all functionality, at least it's not aimed for it now. It's more about the possibility of doing that and add extra tests around parts we couldn't test before, for example key. What extra features can test the new unit test group: - What is the received key for given queue? For example using `prefix`, or if all the `SXxx` calls are expected to use `queue_unique` if it's a unique queue. - If it's not a unique queue, no `SXxx` functions are called, because those sets are used only to check if a value is unique or not. - `HasItem` return `false` always if it's a non-unique queue. - All functions are called exactly `N` times, and we don't have any unexpected calls to redis from the code. Signed-off-by: Victoria Nadasdi <victoria@efertone.me>
162 lines
4.1 KiB
Go
162 lines
4.1 KiB
Go
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package queue
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"time"
|
|
|
|
"code.gitea.io/gitea/modules/graceful"
|
|
"code.gitea.io/gitea/modules/log"
|
|
"code.gitea.io/gitea/modules/nosql"
|
|
|
|
"github.com/redis/go-redis/v9"
|
|
)
|
|
|
|
type baseRedis struct {
|
|
client redis.UniversalClient
|
|
isUnique bool
|
|
cfg *BaseConfig
|
|
prefix string
|
|
|
|
mu sync.Mutex // the old implementation is not thread-safe, the queue operation and set operation should be protected together
|
|
}
|
|
|
|
var _ baseQueue = (*baseRedis)(nil)
|
|
|
|
func newBaseRedisGeneric(cfg *BaseConfig, unique bool, client redis.UniversalClient) (baseQueue, error) {
|
|
if client == nil {
|
|
client = nosql.GetManager().GetRedisClient(cfg.ConnStr)
|
|
}
|
|
|
|
prefix := ""
|
|
uri := nosql.ToRedisURI(cfg.ConnStr)
|
|
|
|
for key, value := range uri.Query() {
|
|
switch key {
|
|
case "prefix":
|
|
if len(value) > 0 {
|
|
prefix = value[0]
|
|
|
|
// As we are not checking any other values, if we found this one, we can
|
|
// exit from the loop.
|
|
// If a new key check is required, remove this break.
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
var err error
|
|
for i := 0; i < 10; i++ {
|
|
err = client.Ping(graceful.GetManager().ShutdownContext()).Err()
|
|
if err == nil {
|
|
break
|
|
}
|
|
log.Warn("Redis is not ready, waiting for 1 second to retry: %v", err)
|
|
time.Sleep(time.Second)
|
|
}
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &baseRedis{cfg: cfg, client: client, isUnique: unique, prefix: prefix}, nil
|
|
}
|
|
|
|
func newBaseRedisSimple(cfg *BaseConfig) (baseQueue, error) {
|
|
return newBaseRedisGeneric(cfg, false, nil)
|
|
}
|
|
|
|
func newBaseRedisUnique(cfg *BaseConfig) (baseQueue, error) {
|
|
return newBaseRedisGeneric(cfg, true, nil)
|
|
}
|
|
|
|
func (q *baseRedis) prefixedName(name string) string {
|
|
return q.prefix + name
|
|
}
|
|
|
|
func (q *baseRedis) PushItem(ctx context.Context, data []byte) error {
|
|
return backoffErr(ctx, backoffBegin, backoffUpper, time.After(pushBlockTime), func() (retry bool, err error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
|
|
cnt, err := q.client.LLen(ctx, q.prefixedName(q.cfg.QueueFullName)).Result()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if int(cnt) >= q.cfg.Length {
|
|
return true, nil
|
|
}
|
|
|
|
if q.isUnique {
|
|
added, err := q.client.SAdd(ctx, q.prefixedName(q.cfg.SetFullName), data).Result()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if added == 0 {
|
|
return false, ErrAlreadyInQueue
|
|
}
|
|
}
|
|
return false, q.client.RPush(ctx, q.prefixedName(q.cfg.QueueFullName), data).Err()
|
|
})
|
|
}
|
|
|
|
func (q *baseRedis) PopItem(ctx context.Context) ([]byte, error) {
|
|
return backoffRetErr(ctx, backoffBegin, backoffUpper, infiniteTimerC, func() (retry bool, data []byte, err error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
|
|
data, err = q.client.LPop(ctx, q.prefixedName(q.cfg.QueueFullName)).Bytes()
|
|
if err == redis.Nil {
|
|
return true, nil, nil
|
|
}
|
|
if err != nil {
|
|
return true, nil, nil
|
|
}
|
|
if q.isUnique {
|
|
// the data has been popped, even if there is any error we can't do anything
|
|
_ = q.client.SRem(ctx, q.prefixedName(q.cfg.SetFullName), data).Err()
|
|
}
|
|
return false, data, err
|
|
})
|
|
}
|
|
|
|
func (q *baseRedis) HasItem(ctx context.Context, data []byte) (bool, error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
if !q.isUnique {
|
|
return false, nil
|
|
}
|
|
return q.client.SIsMember(ctx, q.prefixedName(q.cfg.SetFullName), data).Result()
|
|
}
|
|
|
|
func (q *baseRedis) Len(ctx context.Context) (int, error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
cnt, err := q.client.LLen(ctx, q.prefixedName(q.cfg.QueueFullName)).Result()
|
|
return int(cnt), err
|
|
}
|
|
|
|
func (q *baseRedis) Close() error {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
return q.client.Close()
|
|
}
|
|
|
|
func (q *baseRedis) RemoveAll(ctx context.Context) error {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
|
|
c1 := q.client.Del(ctx, q.prefixedName(q.cfg.QueueFullName))
|
|
// the "set" must be cleared after the "list" because there is no transaction.
|
|
// it's better to have duplicate items than losing items.
|
|
c2 := q.client.Del(ctx, q.prefixedName(q.cfg.SetFullName))
|
|
if c1.Err() != nil {
|
|
return c1.Err()
|
|
}
|
|
if c2.Err() != nil {
|
|
return c2.Err()
|
|
}
|
|
return nil // actually, checking errors doesn't make sense here because the state could be out-of-sync
|
|
}
|