gotosocial/internal/concurrency/workers.go
kim 223025fc27
[security] transport.Controller{} and transport.Transport{} security and performance improvements (#564)
* cache transports in controller by privkey-generated pubkey, add retry logic to transport requests

Signed-off-by: kim <grufwub@gmail.com>

* update code comments, defer mutex unlocks

Signed-off-by: kim <grufwub@gmail.com>

* add count to 'performing request' log message

Signed-off-by: kim <grufwub@gmail.com>

* reduce repeated conversions of same url.URL object

Signed-off-by: kim <grufwub@gmail.com>

* move worker.Worker to concurrency subpackage, add WorkQueue type, limit transport http client use by WorkQueue

Signed-off-by: kim <grufwub@gmail.com>

* fix security advisories regarding max outgoing conns, max rsp body size

- implemented by a new httpclient.Client{} that wraps an underlying
  client with a queue to limit connections, and limit reader wrapping
  a response body with a configured maximum size
- update pub.HttpClient args passed around to be this new httpclient.Client{}

Signed-off-by: kim <grufwub@gmail.com>

* add httpclient tests, move ip validation to separate package + change mechanism

Signed-off-by: kim <grufwub@gmail.com>

* fix merge conflicts

Signed-off-by: kim <grufwub@gmail.com>

* use singular mutex in transport rather than separate signer mus

Signed-off-by: kim <grufwub@gmail.com>

* improved useragent string

Signed-off-by: kim <grufwub@gmail.com>

* add note regarding missing test

Signed-off-by: kim <grufwub@gmail.com>

* remove useragent field from transport (instead store in controller)

Signed-off-by: kim <grufwub@gmail.com>

* shutup linter

Signed-off-by: kim <grufwub@gmail.com>

* reset other signing headers on each loop iteration

Signed-off-by: kim <grufwub@gmail.com>

* respect request ctx during retry-backoff sleep period

Signed-off-by: kim <grufwub@gmail.com>

* use external pkg with docs explaining performance "hack"

Signed-off-by: kim <grufwub@gmail.com>

* use http package constants instead of string method literals

Signed-off-by: kim <grufwub@gmail.com>

* add license file headers

Signed-off-by: kim <grufwub@gmail.com>

* update code comment to match new func names

Signed-off-by: kim <grufwub@gmail.com>

* updates to user-agent string

Signed-off-by: kim <grufwub@gmail.com>

* update signed testrig models to fit with new transport logic (instead uses separate signer now)

Signed-off-by: kim <grufwub@gmail.com>

* fuck you linter

Signed-off-by: kim <grufwub@gmail.com>
2022-05-15 11:16:43 +02:00

105 lines
2.7 KiB
Go

package concurrency
import (
"context"
"errors"
"fmt"
"path"
"reflect"
"runtime"
"codeberg.org/gruf/go-runners"
"github.com/sirupsen/logrus"
)
// WorkerPool represents a proccessor for MsgType objects, using a worker pool to allocate resources.
type WorkerPool[MsgType any] struct {
workers runners.WorkerPool
process func(context.Context, MsgType) error
prefix string // contains type prefix for logging
}
// New returns a new WorkerPool[MsgType] with given number of workers and queue ratio,
// where the queue ratio is multiplied by no. workers to get queue size. If args < 1
// then suitable defaults are determined from the runtime's GOMAXPROCS variable.
func NewWorkerPool[MsgType any](workers int, queueRatio int) *WorkerPool[MsgType] {
var zero MsgType
if workers < 1 {
// ensure sensible workers
workers = runtime.GOMAXPROCS(0)
}
if queueRatio < 1 {
// ensure sensible ratio
queueRatio = 100
}
// Calculate the short type string for the msg type
msgType := reflect.TypeOf(zero).String()
_, msgType = path.Split(msgType)
w := &WorkerPool[MsgType]{
workers: runners.NewWorkerPool(workers, workers*queueRatio),
process: nil,
prefix: fmt.Sprintf("worker.Worker[%s]", msgType),
}
// Log new worker creation with type prefix
logrus.Infof("%s created with workers=%d queue=%d",
w.prefix,
workers,
workers*queueRatio,
)
return w
}
// Start will attempt to start the underlying worker pool, or return error.
func (w *WorkerPool[MsgType]) Start() error {
logrus.Infof("%s starting", w.prefix)
// Check processor was set
if w.process == nil {
return errors.New("nil Worker.process function")
}
// Attempt to start pool
if !w.workers.Start() {
return errors.New("failed to start Worker pool")
}
return nil
}
// Stop will attempt to stop the underlying worker pool, or return error.
func (w *WorkerPool[MsgType]) Stop() error {
logrus.Infof("%s stopping", w.prefix)
// Attempt to stop pool
if !w.workers.Stop() {
return errors.New("failed to stop Worker pool")
}
return nil
}
// SetProcessor will set the Worker's processor function, which is called for each queued message.
func (w *WorkerPool[MsgType]) SetProcessor(fn func(context.Context, MsgType) error) {
if w.process != nil {
logrus.Panicf("%s Worker.process is already set", w.prefix)
}
w.process = fn
}
// Queue will queue provided message to be processed with there's a free worker.
func (w *WorkerPool[MsgType]) Queue(msg MsgType) {
logrus.Tracef("%s queueing message (workers=%d queue=%d): %+v",
w.prefix, w.workers.Workers(), w.workers.Queue(), msg,
)
w.workers.Enqueue(func(ctx context.Context) {
if err := w.process(ctx, msg); err != nil {
logrus.Errorf("%s %v", w.prefix, err)
}
})
}