Compare commits

...

21 commits

Author SHA1 Message Date
6543 018a3aa005
Merge 3a3e09d86d into c6b2cd8a48 2024-04-28 11:26:36 +02:00
renovate[bot] c6b2cd8a48
chore(deps): update node.js to v22 (#3659) 2024-04-28 11:14:03 +02:00
renovate[bot] 325b1b5e57
chore(deps): update dependency trim to v1 (#3658) 2024-04-28 10:50:39 +02:00
Robert Kaussow 4b1ff6d1a7
Compare to pipeline created timestamp while using before/after filter (#3654) 2024-04-28 10:32:31 +02:00
renovate[bot] 2c3cd83402
chore(deps): update dependency got to v14 (#3657) 2024-04-28 10:16:25 +02:00
renovate[bot] a230e88c3a
chore(deps): lock file maintenance (#3656)
[![Mend
Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com)

This PR contains the following updates:

| Update | Change |
|---|---|
| lockFileMaintenance | All locks refreshed |

🔧 This Pull Request updates lock files to use the latest dependency
versions.

---

### Configuration

📅 **Schedule**: Branch creation - "before 4am on Monday" (UTC),
Automerge - "before 4am" (UTC).

🚦 **Automerge**: Enabled.

♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.

👻 **Immortal**: This PR will be recreated if closed unmerged. Get
[config help](https://togithub.com/renovatebot/renovate/discussions) if
that's undesired.

---

- [ ] <!-- rebase-check -->If you want to rebase/retry this PR, check
this box

---

This PR has been generated by [Mend
Renovate](https://www.mend.io/free-developer-tools/renovate/). View
repository job log
[here](https://developer.mend.io/github/woodpecker-ci/woodpecker).

<!--renovate-debug:eyJjcmVhdGVkSW5WZXIiOiIzNy4zMjEuMiIsInVwZGF0ZWRJblZlciI6IjM3LjMyMS4yIiwidGFyZ2V0QnJhbmNoIjoibWFpbiIsImxhYmVscyI6WyJkZXBlbmRlbmNpZXMiLCJkb2N1bWVudGF0aW9uIiwidWkiXX0=-->

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-04-28 08:18:02 +02:00
Anbraten 3a3e09d86d close buffer once done 2024-04-25 19:01:21 +02:00
Anbraten 795b7c2ef9 improve initial log speed of UI 2024-04-25 18:59:15 +02:00
Anbraten 41ac0fe00f lowercase 2024-04-25 13:30:37 +02:00
Anbraten c92aa6de4c lower case comments 2024-04-25 13:02:34 +02:00
Anbraten f7a0e1b1f8 naming 2024-04-25 12:51:48 +02:00
Anbraten e08869716a add tests 2024-04-25 12:45:05 +02:00
Anbraten 7a545940d0 Merge remote-tracking branch 'upstream/main' into pr/6543/2072-2 2024-04-25 11:24:40 +02:00
Anbraten 401fc45dac fix 2024-04-22 14:57:46 +02:00
Anbraten ce463b32b1 add timebased log flushing 2024-04-19 09:56:01 +02:00
Anbraten bc68e2fb08 Merge remote-tracking branch 'upstream/main' into pr/6543/2072-2 2024-04-19 09:07:19 +02:00
6543 00b66a72fa
Merge branch 'main' into cached_logs-io 2023-08-07 17:06:35 +02:00
6543 b3930d1733
Merge branch 'main' into cached_logs-io 2023-08-07 16:51:15 +02:00
6543 436a101b1a
CI.restart() 2023-07-31 16:54:59 +02:00
6543 bb20cf38ba
refactor too 2023-07-31 16:52:36 +02:00
6543 3bb2b82965
add buffer to log writer 2023-07-31 16:34:31 +02:00
16 changed files with 1000 additions and 930 deletions

View file

@ -3,7 +3,7 @@ when:
variables:
- &golang_image 'docker.io/golang:1.22.2'
- &node_image 'docker.io/node:21-alpine'
- &node_image 'docker.io/node:22-alpine'
- &xgo_image 'docker.io/techknowlogick/xgo:go-1.22.1'
- &xgo_version 'go-1.21.2'

View file

@ -1,6 +1,6 @@
variables:
- &golang_image 'docker.io/golang:1.22.2'
- &node_image 'docker.io/node:21-alpine'
- &node_image 'docker.io/node:22-alpine'
- &xgo_image 'docker.io/techknowlogick/xgo:go-1.22.1'
- &xgo_version 'go-1.21.2'
- &buildx_plugin 'docker.io/woodpeckerci/plugin-docker-buildx:3.2.1'

View file

@ -13,7 +13,7 @@ steps:
branch: renovate/*
- name: spellcheck
image: docker.io/node:21-alpine
image: docker.io/node:22-alpine
depends_on: []
commands:
- corepack enable

View file

@ -6,7 +6,7 @@ when:
- renovate/*
variables:
- &node_image 'docker.io/node:21-alpine'
- &node_image 'docker.io/node:22-alpine'
- &when
path:
# related config files

View file

@ -17,15 +17,22 @@ package agent
import (
"io"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
agentLogger "go.woodpecker-ci.org/woodpecker/v2/agent/logger"
"go.woodpecker-ci.org/woodpecker/v2/pipeline"
backend "go.woodpecker-ci.org/woodpecker/v2/pipeline/backend/types"
"go.woodpecker-ci.org/woodpecker/v2/pipeline/rpc"
)
const (
writeBufferSize = 10240 // 10kb
flushInterval = 1 * time.Second
)
func (r *Runner) createLogger(logger zerolog.Logger, uploads *sync.WaitGroup, workflow *rpc.Workflow) pipeline.Logger {
return func(step *backend.Step, rc io.Reader) error {
loglogger := logger.With().
@ -43,7 +50,9 @@ func (r *Runner) createLogger(logger zerolog.Logger, uploads *sync.WaitGroup, wo
loglogger.Debug().Msg("log stream opened")
logStream := rpc.NewLineWriter(r.client, step.UUID, secrets...)
if _, err := io.Copy(logStream, rc); err != nil {
logStreamBufferWithTimeout := agentLogger.NewLogBuffer(logStream, writeBufferSize, flushInterval)
defer logStreamBufferWithTimeout.Close()
if _, err := io.Copy(logStreamBufferWithTimeout, rc); err != nil {
log.Error().Err(err).Msg("copy limited logStream part")
}

84
agent/logger/buf.go Normal file
View file

@ -0,0 +1,84 @@
package logger
import (
"bufio"
"io"
"sync"
"time"
)
type LogBuffer struct {
*sync.Mutex
buffer *bufio.Writer
flushInterval time.Duration
timer *time.Timer
closeChan chan struct{}
}
func NewLogBuffer(writer io.Writer, bufferSize int, flushInterval time.Duration) *LogBuffer {
lb := &LogBuffer{
Mutex: &sync.Mutex{},
buffer: bufio.NewWriterSize(writer, bufferSize),
flushInterval: flushInterval,
timer: time.NewTimer(flushInterval),
closeChan: make(chan struct{}),
}
go lb.start()
return lb
}
func (lb *LogBuffer) Write(data []byte) (int, error) {
n, err := lb.buffer.Write(data)
if err != nil {
return n, err
}
// reset timer since there's new activity
if !lb.timer.Stop() {
<-lb.timer.C
}
lb.timer.Reset(lb.flushInterval)
return n, nil
}
func (lb *LogBuffer) start() {
for {
if !lb.waitForFlush() {
break
}
}
}
func (lb *LogBuffer) waitForFlush() bool {
// wait for either a timeout or a manual flush signal
select {
case <-lb.timer.C:
// time limit reached, flush the buffer
lb.Lock()
defer lb.Unlock()
err := lb.buffer.Flush()
if err != nil {
return false
}
case <-lb.closeChan:
// close signal received
return false
}
return true
}
func (lb *LogBuffer) Flush() error {
lb.Lock()
defer lb.Unlock()
return lb.buffer.Flush()
}
func (lb *LogBuffer) Close() error {
lb.Lock()
defer lb.Unlock()
lb.timer.Stop()
close(lb.closeChan)
return lb.buffer.Flush()
}

92
agent/logger/buf_test.go Normal file
View file

@ -0,0 +1,92 @@
package logger_test
import (
"testing"
"time"
"go.woodpecker-ci.org/woodpecker/v2/agent/logger"
)
type testBuffer struct {
buf []byte
flushes int
}
func (b *testBuffer) Write(p []byte) (n int, err error) {
b.buf = append(b.buf, p...)
b.flushes++
return len(p), nil
}
func TestFlushAfterSize(t *testing.T) {
bufSize := 4
bufTime := 10 * time.Minute // using a high value to avoid the timer to trigger
testBuffer := &testBuffer{
buf: make([]byte, 0),
flushes: 0,
}
logBuffer := logger.NewLogBuffer(testBuffer, bufSize, bufTime)
defer logBuffer.Close()
// write 4 bytes (exact buffer size, so fill buffer)
if _, err := logBuffer.Write([]byte("123")); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if string(testBuffer.buf) != "" {
t.Fatalf("expected 0 bytes, got %s", testBuffer.buf)
}
// write 4 more bytes (buffer should be flushed once)
if _, err := logBuffer.Write([]byte("4567")); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if string(testBuffer.buf) != "1234" {
t.Fatalf("expected 1234, got %s", testBuffer.buf)
}
// write 2 more bytes (buffer should be flushed again)
if _, err := logBuffer.Write([]byte("89")); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// check if the buffer is flushed
if testBuffer.flushes != 2 {
t.Fatalf("expected 2 flushes, got %d", testBuffer.flushes)
}
if string(testBuffer.buf) != "12345678" {
t.Fatalf("expected 12345678, got %s", testBuffer.buf)
}
}
func TestFlushAfterTime(t *testing.T) {
bufSize := 1024 // using a high value to avoid the buffer to be flushed by size
bufTime := 10 * time.Millisecond
testBuffer := &testBuffer{
buf: make([]byte, 0),
}
logBuffer := logger.NewLogBuffer(testBuffer, bufSize, bufTime)
defer logBuffer.Close()
// write 4 bytes
if _, err := logBuffer.Write([]byte("1234")); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// check if the buffer was not flushed
if string(testBuffer.buf) != "" {
t.Fatalf("expected 0 bytes, got %d", len(testBuffer.buf))
}
// wait for the buffer to be flushed
time.Sleep(20 * time.Millisecond)
if string(testBuffer.buf) != "1234" {
t.Fatalf("expected 4 bytes, got %d", len(testBuffer.buf))
}
}

View file

@ -1,6 +1,6 @@
# docker build --rm -f docker/Dockerfile.make -t woodpecker/make:local .
FROM docker.io/golang:1.22-alpine3.19 as golang_image
FROM docker.io/node:21-alpine3.19
FROM docker.io/node:22-alpine3.19
# renovate: datasource=repology depName=alpine_3_19/make versioning=loose
ENV MAKE_VERSION="4.4.1-r2"

View file

@ -53,8 +53,8 @@
},
"pnpm": {
"overrides": {
"trim": "^0.0.3",
"got": "^11.8.5"
"trim": "^1.0.0",
"got": "^14.0.0"
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -18,6 +18,7 @@ package rpc
import (
"context"
"fmt"
"io"
"strings"
"time"
@ -64,7 +65,7 @@ type LineWriter struct {
}
// NewLineWriter returns a new line reader.
func NewLineWriter(peer Peer, stepUUID string, secret ...string) *LineWriter {
func NewLineWriter(peer Peer, stepUUID string, secret ...string) io.Writer {
return &LineWriter{
peer: peer,
stepUUID: stepUUID,
@ -96,13 +97,3 @@ func (w *LineWriter) Write(p []byte) (n int, err error) {
w.lines = append(w.lines, line)
return len(p), nil
}
// Lines returns the line history
func (w *LineWriter) Lines() []*LogEntry {
return w.lines
}
// Clear clears the line history
func (w *LineWriter) Clear() {
w.lines = w.lines[:0]
}

View file

@ -186,7 +186,7 @@ func LogStreamSSE(c *gin.Context) {
return
}
if step.State != model.StatusRunning {
if step.State != model.StatusPending && step.State != model.StatusRunning {
log.Debug().Msg("step not running (anymore).")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: step not running (anymore)\n\n"))
return

View file

@ -59,11 +59,11 @@ func (s storage) GetPipelineList(repo *model.Repo, p *model.ListOptions, f *mode
if f != nil {
if f.After != 0 {
cond = cond.And(builder.Gt{"pipeline_started": f.After})
cond = cond.And(builder.Gt{"pipeline_created": f.After})
}
if f.Before != 0 {
cond = cond.And(builder.Lt{"pipeline_started": f.Before})
cond = cond.And(builder.Lt{"pipeline_created": f.Before})
}
}

View file

@ -231,21 +231,19 @@ func TestPipelines(t *testing.T) {
})
g.It("Should get filtered pipelines", func() {
dt1, _ := time.Parse(time.RFC3339, "2023-01-15T15:00:00Z")
pipeline1 := &model.Pipeline{
RepoID: repo.ID,
Started: dt1.Unix(),
RepoID: repo.ID,
}
dt2, _ := time.Parse(time.RFC3339, "2023-01-15T16:30:00Z")
pipeline2 := &model.Pipeline{
RepoID: repo.ID,
Started: dt2.Unix(),
RepoID: repo.ID,
}
err1 := store.CreatePipeline(pipeline1, []*model.Step{}...)
g.Assert(err1).IsNil()
time.Sleep(1 * time.Second)
before := time.Now().Unix()
err2 := store.CreatePipeline(pipeline2, []*model.Step{}...)
g.Assert(err2).IsNil()
pipelines, err3 := store.GetPipelineList(&model.Repo{ID: 1}, &model.ListOptions{Page: 1, PerPage: 50}, &model.PipelineFilter{Before: dt2.Unix()})
pipelines, err3 := store.GetPipelineList(&model.Repo{ID: 1}, &model.ListOptions{Page: 1, PerPage: 50}, &model.PipelineFilter{Before: before})
g.Assert(err3).IsNil()
g.Assert(len(pipelines)).Equal(1)
g.Assert(pipelines[0].ID).Equal(pipeline1.ID)

File diff suppressed because it is too large Load diff

View file

@ -297,7 +297,7 @@ async function loadLogs() {
const logs = await apiClient.getLogs(repo.value.id, pipeline.value.number, step.value.id);
logs?.forEach((line) => writeLog({ index: line.line, text: decode(line.data), time: line.time }));
flushLogs(false);
} else if (isStepRunning(step.value)) {
} else if (step.value.state === 'pending' || isStepRunning(step.value)) {
loadedStepSlug.value = stepSlug.value;
stream.value = apiClient.streamLogs(repo.value.id, pipeline.value.number, step.value.id, (line) => {
writeLog({ index: line.line, text: decode(line.data), time: line.time });