mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2025-01-30 10:48:15 +00:00
Queue: Improve logging
This commit is contained in:
parent
2927bc6fe5
commit
9ad9070555
8 changed files with 115 additions and 62 deletions
|
@ -12,6 +12,8 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var manager *Manager
|
var manager *Manager
|
||||||
|
@ -96,6 +98,7 @@ func (m *Manager) Add(queue Queue,
|
||||||
}
|
}
|
||||||
m.Queues[desc.QID] = desc
|
m.Queues[desc.QID] = desc
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
|
log.Trace("Queue Manager registered: %s (QID: %d)", desc.Name, desc.QID)
|
||||||
return desc.QID
|
return desc.QID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,6 +107,8 @@ func (m *Manager) Remove(qid int64) {
|
||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
delete(m.Queues, qid)
|
delete(m.Queues, qid)
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
|
log.Trace("Queue Manager removed: QID: %d", qid)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDescription by qid
|
// GetDescription by qid
|
||||||
|
|
|
@ -91,16 +91,18 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func()))
|
||||||
|
|
||||||
go l.readToChan()
|
go l.readToChan()
|
||||||
|
|
||||||
log.Trace("%s Waiting til closed", l.name)
|
log.Trace("LevelQueue: %s Waiting til closed", l.name)
|
||||||
<-l.closed
|
<-l.closed
|
||||||
|
|
||||||
log.Trace("%s Waiting til done", l.name)
|
log.Trace("LevelQueue: %s Waiting til done", l.name)
|
||||||
l.pool.Wait()
|
l.pool.Wait()
|
||||||
// FIXME: graceful: Needs HammerContext
|
|
||||||
log.Trace("%s Waiting til cleaned", l.name)
|
|
||||||
|
|
||||||
l.pool.CleanUp(context.TODO())
|
log.Trace("LevelQueue: %s Waiting til cleaned", l.name)
|
||||||
log.Trace("%s cleaned", l.name)
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
atTerminate(ctx, cancel)
|
||||||
|
l.pool.CleanUp(ctx)
|
||||||
|
cancel()
|
||||||
|
log.Trace("LevelQueue: %s Cleaned", l.name)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,7 +117,7 @@ func (l *LevelQueue) readToChan() {
|
||||||
bs, err := l.queue.RPop()
|
bs, err := l.queue.RPop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != levelqueue.ErrNotFound {
|
if err != levelqueue.ErrNotFound {
|
||||||
log.Error("%s RPop: %v", l.name, err)
|
log.Error("LevelQueue: %s Error on RPop: %v", l.name, err)
|
||||||
}
|
}
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
|
@ -137,14 +139,14 @@ func (l *LevelQueue) readToChan() {
|
||||||
err = json.Unmarshal(bs, &data)
|
err = json.Unmarshal(bs, &data)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("LevelQueue: %s failed to unmarshal: %v", l.name, err)
|
log.Error("LevelQueue: %s Failed to unmarshal with error: %v", l.name, err)
|
||||||
time.Sleep(time.Millisecond * 10)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("LevelQueue %s: task found: %#v", l.name, data)
|
log.Trace("LevelQueue %s: Task found: %#v", l.name, data)
|
||||||
l.pool.Push(data)
|
l.pool.Push(data)
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 10)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -170,7 +172,7 @@ func (l *LevelQueue) Push(data Data) error {
|
||||||
|
|
||||||
// Shutdown this queue and stop processing
|
// Shutdown this queue and stop processing
|
||||||
func (l *LevelQueue) Shutdown() {
|
func (l *LevelQueue) Shutdown() {
|
||||||
log.Trace("Shutdown: %s", l.name)
|
log.Trace("LevelQueue: %s Shutdown", l.name)
|
||||||
select {
|
select {
|
||||||
case <-l.closed:
|
case <-l.closed:
|
||||||
default:
|
default:
|
||||||
|
@ -180,10 +182,16 @@ func (l *LevelQueue) Shutdown() {
|
||||||
|
|
||||||
// Terminate this queue and close the queue
|
// Terminate this queue and close the queue
|
||||||
func (l *LevelQueue) Terminate() {
|
func (l *LevelQueue) Terminate() {
|
||||||
log.Trace("Terminating: %s", l.name)
|
log.Trace("LevelQueue: %s Terminating", l.name)
|
||||||
l.Shutdown()
|
l.Shutdown()
|
||||||
if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" {
|
select {
|
||||||
log.Error("Error whilst closing internal queue in %s: %v", l.name, err)
|
case <-l.terminated:
|
||||||
|
default:
|
||||||
|
close(l.terminated)
|
||||||
|
if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" {
|
||||||
|
log.Error("Error whilst closing internal queue in %s: %v", l.name, err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -133,22 +133,28 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte
|
||||||
_ = p.ChannelQueue.pool.AddWorkers(p.workers, 0)
|
_ = p.ChannelQueue.pool.AddWorkers(p.workers, 0)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
log.Trace("PersistableChannelQueue: %s Waiting til closed", p.delayedStarter.name)
|
||||||
<-p.closed
|
<-p.closed
|
||||||
|
log.Trace("PersistableChannelQueue: %s Cancelling pools", p.delayedStarter.name)
|
||||||
p.ChannelQueue.pool.cancel()
|
p.ChannelQueue.pool.cancel()
|
||||||
p.internal.(*LevelQueue).pool.cancel()
|
p.internal.(*LevelQueue).pool.cancel()
|
||||||
|
log.Trace("PersistableChannelQueue: %s Waiting til done", p.delayedStarter.name)
|
||||||
p.ChannelQueue.pool.Wait()
|
p.ChannelQueue.pool.Wait()
|
||||||
p.internal.(*LevelQueue).pool.Wait()
|
p.internal.(*LevelQueue).pool.Wait()
|
||||||
// Redirect all remaining data in the chan to the internal channel
|
// Redirect all remaining data in the chan to the internal channel
|
||||||
go func() {
|
go func() {
|
||||||
|
log.Trace("PersistableChannelQueue: %s Redirecting remaining data", p.delayedStarter.name)
|
||||||
for data := range p.ChannelQueue.pool.dataChan {
|
for data := range p.ChannelQueue.pool.dataChan {
|
||||||
_ = p.internal.Push(data)
|
_ = p.internal.Push(data)
|
||||||
}
|
}
|
||||||
|
log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", p.delayedStarter.name)
|
||||||
}()
|
}()
|
||||||
|
log.Trace("PersistableChannelQueue: %s Done main loop", p.delayedStarter.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown processing this queue
|
// Shutdown processing this queue
|
||||||
func (p *PersistableChannelQueue) Shutdown() {
|
func (p *PersistableChannelQueue) Shutdown() {
|
||||||
log.Trace("Shutdown: %s", p.delayedStarter.name)
|
log.Trace("PersistableChannelQueue: %s Shutdown", p.delayedStarter.name)
|
||||||
select {
|
select {
|
||||||
case <-p.closed:
|
case <-p.closed:
|
||||||
default:
|
default:
|
||||||
|
@ -163,7 +169,7 @@ func (p *PersistableChannelQueue) Shutdown() {
|
||||||
|
|
||||||
// Terminate this queue and close the queue
|
// Terminate this queue and close the queue
|
||||||
func (p *PersistableChannelQueue) Terminate() {
|
func (p *PersistableChannelQueue) Terminate() {
|
||||||
log.Trace("Terminating: %s", p.delayedStarter.name)
|
log.Trace("PersistableChannelQueue: %s Terminating", p.delayedStarter.name)
|
||||||
p.Shutdown()
|
p.Shutdown()
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
|
|
|
@ -24,8 +24,8 @@ func TestPersistableChannelQueue(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var queueShutdown func()
|
queueShutdown := []func(){}
|
||||||
var queueTerminate func()
|
queueTerminate := []func(){}
|
||||||
|
|
||||||
tmpDir, err := ioutil.TempDir("", "persistable-channel-queue-test-data")
|
tmpDir, err := ioutil.TempDir("", "persistable-channel-queue-test-data")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -40,9 +40,9 @@ func TestPersistableChannelQueue(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
go queue.Run(func(_ context.Context, shutdown func()) {
|
go queue.Run(func(_ context.Context, shutdown func()) {
|
||||||
queueShutdown = shutdown
|
queueShutdown = append(queueShutdown, shutdown)
|
||||||
}, func(_ context.Context, terminate func()) {
|
}, func(_ context.Context, terminate func()) {
|
||||||
queueTerminate = terminate
|
queueTerminate = append(queueTerminate, terminate)
|
||||||
})
|
})
|
||||||
|
|
||||||
test1 := testData{"A", 1}
|
test1 := testData{"A", 1}
|
||||||
|
@ -66,7 +66,9 @@ func TestPersistableChannelQueue(t *testing.T) {
|
||||||
err = queue.Push(test1)
|
err = queue.Push(test1)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
queueShutdown()
|
for _, callback := range queueShutdown {
|
||||||
|
callback()
|
||||||
|
}
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
err = queue.Push(&test1)
|
err = queue.Push(&test1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -77,7 +79,9 @@ func TestPersistableChannelQueue(t *testing.T) {
|
||||||
assert.Fail(t, "Handler processing should have stopped")
|
assert.Fail(t, "Handler processing should have stopped")
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
queueTerminate()
|
for _, callback := range queueTerminate {
|
||||||
|
callback()
|
||||||
|
}
|
||||||
|
|
||||||
// Reopen queue
|
// Reopen queue
|
||||||
queue, err = NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{
|
queue, err = NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{
|
||||||
|
@ -89,9 +93,9 @@ func TestPersistableChannelQueue(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
go queue.Run(func(_ context.Context, shutdown func()) {
|
go queue.Run(func(_ context.Context, shutdown func()) {
|
||||||
queueShutdown = shutdown
|
queueShutdown = append(queueShutdown, shutdown)
|
||||||
}, func(_ context.Context, terminate func()) {
|
}, func(_ context.Context, terminate func()) {
|
||||||
queueTerminate = terminate
|
queueTerminate = append(queueTerminate, terminate)
|
||||||
})
|
})
|
||||||
|
|
||||||
result3 := <-handleChan
|
result3 := <-handleChan
|
||||||
|
@ -101,7 +105,11 @@ func TestPersistableChannelQueue(t *testing.T) {
|
||||||
result4 := <-handleChan
|
result4 := <-handleChan
|
||||||
assert.Equal(t, test2.TestString, result4.TestString)
|
assert.Equal(t, test2.TestString, result4.TestString)
|
||||||
assert.Equal(t, test2.TestInt, result4.TestInt)
|
assert.Equal(t, test2.TestInt, result4.TestInt)
|
||||||
queueShutdown()
|
for _, callback := range queueShutdown {
|
||||||
queueTerminate()
|
callback()
|
||||||
|
}
|
||||||
|
for _, callback := range queueTerminate {
|
||||||
|
callback()
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ package queue
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -23,11 +24,15 @@ func TestLevelQueue(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var queueShutdown func()
|
queueShutdown := []func(){}
|
||||||
var queueTerminate func()
|
queueTerminate := []func(){}
|
||||||
|
|
||||||
|
tmpDir, err := ioutil.TempDir("", "level-queue-test-data")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
queue, err := NewLevelQueue(handle, LevelQueueConfiguration{
|
queue, err := NewLevelQueue(handle, LevelQueueConfiguration{
|
||||||
DataDir: "level-queue-test-data",
|
DataDir: tmpDir,
|
||||||
BatchLength: 2,
|
BatchLength: 2,
|
||||||
Workers: 1,
|
Workers: 1,
|
||||||
QueueLength: 20,
|
QueueLength: 20,
|
||||||
|
@ -38,9 +43,9 @@ func TestLevelQueue(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
go queue.Run(func(_ context.Context, shutdown func()) {
|
go queue.Run(func(_ context.Context, shutdown func()) {
|
||||||
queueShutdown = shutdown
|
queueShutdown = append(queueShutdown, shutdown)
|
||||||
}, func(_ context.Context, terminate func()) {
|
}, func(_ context.Context, terminate func()) {
|
||||||
queueTerminate = terminate
|
queueTerminate = append(queueTerminate, terminate)
|
||||||
})
|
})
|
||||||
|
|
||||||
test1 := testData{"A", 1}
|
test1 := testData{"A", 1}
|
||||||
|
@ -64,7 +69,9 @@ func TestLevelQueue(t *testing.T) {
|
||||||
err = queue.Push(test1)
|
err = queue.Push(test1)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
queueShutdown()
|
for _, callback := range queueShutdown {
|
||||||
|
callback()
|
||||||
|
}
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
err = queue.Push(&test1)
|
err = queue.Push(&test1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -75,24 +82,30 @@ func TestLevelQueue(t *testing.T) {
|
||||||
assert.Fail(t, "Handler processing should have stopped")
|
assert.Fail(t, "Handler processing should have stopped")
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
queueTerminate()
|
for _, callback := range queueTerminate {
|
||||||
|
callback()
|
||||||
|
}
|
||||||
|
|
||||||
// Reopen queue
|
// Reopen queue
|
||||||
queue, err = NewLevelQueue(handle, LevelQueueConfiguration{
|
queue, err = NewWrappedQueue(handle,
|
||||||
DataDir: "level-queue-test-data",
|
WrappedQueueConfiguration{
|
||||||
BatchLength: 2,
|
Underlying: LevelQueueType,
|
||||||
Workers: 1,
|
Config: LevelQueueConfiguration{
|
||||||
QueueLength: 20,
|
DataDir: tmpDir,
|
||||||
BlockTimeout: 1 * time.Second,
|
BatchLength: 2,
|
||||||
BoostTimeout: 5 * time.Minute,
|
Workers: 1,
|
||||||
BoostWorkers: 5,
|
QueueLength: 20,
|
||||||
}, &testData{})
|
BlockTimeout: 1 * time.Second,
|
||||||
|
BoostTimeout: 5 * time.Minute,
|
||||||
|
BoostWorkers: 5,
|
||||||
|
},
|
||||||
|
}, &testData{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
go queue.Run(func(_ context.Context, shutdown func()) {
|
go queue.Run(func(_ context.Context, shutdown func()) {
|
||||||
queueShutdown = shutdown
|
queueShutdown = append(queueShutdown, shutdown)
|
||||||
}, func(_ context.Context, terminate func()) {
|
}, func(_ context.Context, terminate func()) {
|
||||||
queueTerminate = terminate
|
queueTerminate = append(queueTerminate, terminate)
|
||||||
})
|
})
|
||||||
|
|
||||||
result3 := <-handleChan
|
result3 := <-handleChan
|
||||||
|
@ -102,8 +115,10 @@ func TestLevelQueue(t *testing.T) {
|
||||||
result4 := <-handleChan
|
result4 := <-handleChan
|
||||||
assert.Equal(t, test2.TestString, result4.TestString)
|
assert.Equal(t, test2.TestString, result4.TestString)
|
||||||
assert.Equal(t, test2.TestInt, result4.TestInt)
|
assert.Equal(t, test2.TestInt, result4.TestInt)
|
||||||
queueShutdown()
|
for _, callback := range queueShutdown {
|
||||||
queueTerminate()
|
callback()
|
||||||
|
}
|
||||||
os.RemoveAll("level-queue-test-data")
|
for _, callback := range queueTerminate {
|
||||||
|
callback()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,10 +116,16 @@ func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func()))
|
||||||
|
|
||||||
go r.readToChan()
|
go r.readToChan()
|
||||||
|
|
||||||
|
log.Trace("RedisQueue: %s Waiting til closed", r.name)
|
||||||
<-r.closed
|
<-r.closed
|
||||||
|
log.Trace("RedisQueue: %s Waiting til done", r.name)
|
||||||
r.pool.Wait()
|
r.pool.Wait()
|
||||||
// FIXME: graceful: Needs HammerContext
|
|
||||||
r.pool.CleanUp(context.TODO())
|
log.Trace("RedisQueue: %s Waiting til cleaned", r.name)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
atTerminate(ctx, cancel)
|
||||||
|
r.pool.CleanUp(ctx)
|
||||||
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RedisQueue) readToChan() {
|
func (r *RedisQueue) readToChan() {
|
||||||
|
@ -132,7 +138,7 @@ func (r *RedisQueue) readToChan() {
|
||||||
default:
|
default:
|
||||||
bs, err := r.client.LPop(r.queueName).Bytes()
|
bs, err := r.client.LPop(r.queueName).Bytes()
|
||||||
if err != nil && err != redis.Nil {
|
if err != nil && err != redis.Nil {
|
||||||
log.Error("RedisQueue: %s LPop failed: %v", r.name, err)
|
log.Error("RedisQueue: %s Error on LPop: %v", r.name, err)
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -153,12 +159,12 @@ func (r *RedisQueue) readToChan() {
|
||||||
err = json.Unmarshal(bs, &data)
|
err = json.Unmarshal(bs, &data)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("RedisQueue: %s Unmarshal: %v", r.name, err)
|
log.Error("RedisQueue: %s Error on Unmarshal: %v", r.name, err)
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace("RedisQueue: %s task found: %#v", r.name, data)
|
log.Trace("RedisQueue: %s Task found: %#v", r.name, data)
|
||||||
r.pool.Push(data)
|
r.pool.Push(data)
|
||||||
time.Sleep(time.Millisecond * 10)
|
time.Sleep(time.Millisecond * 10)
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,7 +92,7 @@ type WrappedQueue struct {
|
||||||
|
|
||||||
// NewWrappedQueue will attempt to create a queue of the provided type,
|
// NewWrappedQueue will attempt to create a queue of the provided type,
|
||||||
// but if there is a problem creating this queue it will instead create
|
// but if there is a problem creating this queue it will instead create
|
||||||
// a WrappedQueue with delayed the startup of the queue instead and a
|
// a WrappedQueue with delayed startup of the queue instead and a
|
||||||
// channel which will be redirected to the queue
|
// channel which will be redirected to the queue
|
||||||
func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
|
func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) {
|
||||||
configInterface, err := toConfig(WrappedQueueConfiguration{}, cfg)
|
configInterface, err := toConfig(WrappedQueueConfiguration{}, cfg)
|
||||||
|
@ -162,11 +162,12 @@ func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())
|
||||||
}
|
}
|
||||||
|
|
||||||
q.internal.Run(atShutdown, atTerminate)
|
q.internal.Run(atShutdown, atTerminate)
|
||||||
|
log.Trace("WrappedQueue: %s Done", q.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown this queue and stop processing
|
// Shutdown this queue and stop processing
|
||||||
func (q *WrappedQueue) Shutdown() {
|
func (q *WrappedQueue) Shutdown() {
|
||||||
log.Trace("Shutdown: %s", q.name)
|
log.Trace("WrappedQueue: %s Shutdown", q.name)
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
if q.internal == nil {
|
if q.internal == nil {
|
||||||
|
@ -179,7 +180,7 @@ func (q *WrappedQueue) Shutdown() {
|
||||||
|
|
||||||
// Terminate this queue and close the queue
|
// Terminate this queue and close the queue
|
||||||
func (q *WrappedQueue) Terminate() {
|
func (q *WrappedQueue) Terminate() {
|
||||||
log.Trace("Terminating: %s", q.name)
|
log.Trace("WrappedQueue: %s Terminating", q.name)
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
if q.internal == nil {
|
if q.internal == nil {
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (p *WorkerPool) pushBoost(data Data) {
|
||||||
ctx, cancel := context.WithCancel(p.baseCtx)
|
ctx, cancel := context.WithCancel(p.baseCtx)
|
||||||
desc := GetManager().GetDescription(p.qid)
|
desc := GetManager().GetDescription(p.qid)
|
||||||
if desc != nil {
|
if desc != nil {
|
||||||
log.Warn("Worker Channel for %v blocked for %v - adding %d temporary workers for %s, block timeout now %v", desc.Name, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout)
|
log.Warn("WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, desc.Name, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout)
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
pid := desc.RegisterWorkers(p.boostWorkers, start, false, start, cancel)
|
pid := desc.RegisterWorkers(p.boostWorkers, start, false, start, cancel)
|
||||||
|
@ -82,7 +82,7 @@ func (p *WorkerPool) pushBoost(data Data) {
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
log.Warn("Worker Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout)
|
log.Warn("WorkerPool: %d Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
<-time.After(p.boostTimeout)
|
<-time.After(p.boostTimeout)
|
||||||
|
@ -128,6 +128,10 @@ func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.Cance
|
||||||
desc.RemoveWorkers(pid)
|
desc.RemoveWorkers(pid)
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
|
log.Trace("WorkerPool: %d (for %s) adding %d workers with group id: %d", p.qid, desc.Name, number, pid)
|
||||||
|
} else {
|
||||||
|
log.Trace("WorkerPool: %d adding %d workers (no group id)", p.qid, number)
|
||||||
|
|
||||||
}
|
}
|
||||||
p.addWorkers(ctx, number)
|
p.addWorkers(ctx, number)
|
||||||
return cancel
|
return cancel
|
||||||
|
@ -173,18 +177,18 @@ func (p *WorkerPool) Wait() {
|
||||||
// CleanUp will drain the remaining contents of the channel
|
// CleanUp will drain the remaining contents of the channel
|
||||||
// This should be called after AddWorkers context is closed
|
// This should be called after AddWorkers context is closed
|
||||||
func (p *WorkerPool) CleanUp(ctx context.Context) {
|
func (p *WorkerPool) CleanUp(ctx context.Context) {
|
||||||
log.Trace("CleanUp")
|
log.Trace("WorkerPool: %d CleanUp", p.qid)
|
||||||
close(p.dataChan)
|
close(p.dataChan)
|
||||||
for data := range p.dataChan {
|
for data := range p.dataChan {
|
||||||
p.handle(data)
|
p.handle(data)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Warn("Cleanup context closed before finishing clean-up")
|
log.Warn("WorkerPool: %d Cleanup context closed before finishing clean-up", p.qid)
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Trace("CleanUp done")
|
log.Trace("WorkerPool: %d CleanUp Done", p.qid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *WorkerPool) doWork(ctx context.Context) {
|
func (p *WorkerPool) doWork(ctx context.Context) {
|
||||||
|
|
Loading…
Reference in a new issue