1
0
Fork 0
mirror of https://github.com/woodpecker-ci/woodpecker.git synced 2024-12-20 07:26:34 +00:00
woodpecker/server/api/stream.go

303 lines
7.7 KiB
Go
Raw Normal View History

// Copyright 2022 Woodpecker Authors
2018-02-19 22:24:10 +00:00
// Copyright 2018 Drone.IO Inc.
2018-03-21 13:02:17 +00:00
//
2018-02-19 22:24:10 +00:00
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
2018-03-21 13:02:17 +00:00
//
2018-02-19 22:24:10 +00:00
// http://www.apache.org/licenses/LICENSE-2.0
2018-03-21 13:02:17 +00:00
//
2018-02-19 22:24:10 +00:00
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
2015-09-30 01:21:17 +00:00
import (
2017-03-05 07:56:08 +00:00
"context"
"encoding/json"
"fmt"
2017-07-18 16:27:20 +00:00
"io"
2017-03-16 10:14:02 +00:00
"net/http"
2015-09-30 01:21:17 +00:00
"strconv"
"time"
2015-09-30 01:21:17 +00:00
"github.com/gin-gonic/gin"
"github.com/rs/zerolog/log"
"go.woodpecker-ci.org/woodpecker/v2/server"
"go.woodpecker-ci.org/woodpecker/v2/server/logging"
"go.woodpecker-ci.org/woodpecker/v2/server/model"
"go.woodpecker-ci.org/woodpecker/v2/server/pubsub"
"go.woodpecker-ci.org/woodpecker/v2/server/router/middleware/session"
"go.woodpecker-ci.org/woodpecker/v2/server/store"
2015-09-30 01:21:17 +00:00
)
const (
// How many batches of logs to keep for each client before starting to
// drop them if the client is not consuming them faster than they arrive.
maxQueuedBatchesPerClient int = 30
)
2023-10-24 13:21:05 +00:00
// EventStreamSSE
2017-07-24 19:57:07 +00:00
//
// @Summary Stream events like pipeline updates
// @Description With quic and http2 support
// @Router /stream/events [get]
// @Produce plain
// @Success 200
2023-10-24 13:21:05 +00:00
// @Tags Events
2017-07-18 16:27:20 +00:00
func EventStreamSSE(c *gin.Context) {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-store")
2017-07-18 16:27:20 +00:00
c.Header("Connection", "keep-alive")
2017-07-24 17:23:22 +00:00
c.Header("X-Accel-Buffering", "no")
2017-07-18 16:27:20 +00:00
rw := c.Writer
flusher, ok := rw.(http.Flusher)
if !ok {
c.String(http.StatusInternalServerError, "Streaming not supported")
2017-07-18 16:27:20 +00:00
return
}
2017-08-22 15:49:44 +00:00
// ping the client
logWriteStringErr(io.WriteString(rw, ": ping\n\n"))
2017-08-22 15:49:44 +00:00
flusher.Flush()
log.Debug().Msg("user feed: connection opened")
2017-07-18 16:27:20 +00:00
user := session.User(c)
repo := map[string]bool{}
if user != nil {
repos, _ := store.FromContext(c).RepoList(user, false, true)
2017-07-18 16:27:20 +00:00
for _, r := range repos {
repo[r.FullName] = true
}
}
eventChan := make(chan []byte, 10)
ctx, cancel := context.WithCancelCause(
2017-07-18 16:27:20 +00:00
context.Background(),
)
defer func() {
cancel(nil)
close(eventChan)
log.Debug().Msg("user feed: connection closed")
2017-07-18 16:27:20 +00:00
}()
go func() {
2023-10-13 05:34:33 +00:00
server.Config.Services.Pubsub.Subscribe(ctx, func(m pubsub.Message) {
defer func() {
obj := recover() // fix #2480 // TODO: check if it's still needed
log.Trace().Msgf("pubsub subscribe recover return: %v", obj)
}()
2017-07-18 16:27:20 +00:00
name := m.Labels["repo"]
priv := m.Labels["private"]
if repo[name] || priv == "false" {
select {
case <-ctx.Done():
return
default:
eventChan <- m.Data
2017-07-18 16:27:20 +00:00
}
}
})
2023-10-13 05:34:33 +00:00
cancel(nil)
2017-07-18 16:27:20 +00:00
}()
for {
select {
case <-rw.CloseNotify():
return
case <-ctx.Done():
return
2017-08-22 15:49:44 +00:00
case <-time.After(time.Second * 30):
logWriteStringErr(io.WriteString(rw, ": ping\n\n"))
2017-08-22 15:49:44 +00:00
flusher.Flush()
case buf, ok := <-eventChan:
2017-07-18 16:27:20 +00:00
if ok {
logWriteStringErr(io.WriteString(rw, "data: "))
logWriteStringErr(rw.Write(buf))
logWriteStringErr(io.WriteString(rw, "\n\n"))
2017-07-18 16:27:20 +00:00
flusher.Flush()
}
}
}
}
2017-07-24 19:57:07 +00:00
2023-10-24 13:21:05 +00:00
// LogStreamSSE
//
// @Summary Stream logs of a pipeline step
2023-10-24 13:21:05 +00:00
// @Router /stream/logs/{repo_id}/{pipeline}/{stepID} [get]
// @Produce plain
// @Success 200
// @Tags Pipeline logs
// @Param repo_id path int true "the repository id"
// @Param pipeline path int true "the number of the pipeline"
// @Param stepID path int true "the step id"
2017-07-24 19:57:07 +00:00
func LogStreamSSE(c *gin.Context) {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no")
rw := c.Writer
flusher, ok := rw.(http.Flusher)
if !ok {
c.String(http.StatusInternalServerError, "Streaming not supported")
2017-07-24 19:57:07 +00:00
return
}
logWriteStringErr(io.WriteString(rw, ": ping\n\n"))
2017-08-22 15:49:44 +00:00
flusher.Flush()
_store := store.FromContext(c)
repo := session.Repo(c)
pipeline, err := strconv.ParseInt(c.Param("pipeline"), 10, 64)
if err != nil {
log.Debug().Err(err).Msg("pipeline number invalid")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: pipeline number invalid\n\n"))
return
}
pl, err := _store.GetPipelineNumber(repo, pipeline)
2017-07-24 19:57:07 +00:00
if err != nil {
log.Debug().Err(err).Msg("stream cannot get pipeline number")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: pipeline not found\n\n"))
2017-07-24 19:57:07 +00:00
return
}
stepID, err := strconv.ParseInt(c.Param("stepId"), 10, 64)
if err != nil {
log.Debug().Err(err).Msg("step id invalid")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: step id invalid\n\n"))
return
}
step, err := _store.StepLoad(stepID)
2017-07-24 19:57:07 +00:00
if err != nil {
log.Debug().Err(err).Msg("stream cannot get step number")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: process not found\n\n"))
2017-07-24 19:57:07 +00:00
return
}
if step.PipelineID != pl.ID {
// make sure we cannot read arbitrary logs by id
err = fmt.Errorf("step with id %d is not part of repo %s", stepID, repo.FullName)
log.Debug().Err(err).Msg("event error")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: "+err.Error()+"\n\n"))
return
}
2024-06-13 15:18:32 +00:00
if step.State != model.StatusPending && step.State != model.StatusRunning {
log.Debug().Msg("step not running (anymore).")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: step not running (anymore)\n\n"))
2017-07-24 19:57:07 +00:00
return
}
logChan := make(chan []byte, 10)
ctx, cancel := context.WithCancelCause(
2017-07-24 19:57:07 +00:00
context.Background(),
)
log.Debug().Msg("log stream: connection opened")
2017-07-24 19:57:07 +00:00
defer func() {
cancel(nil)
close(logChan)
log.Debug().Msg("log stream: connection closed")
2017-07-24 19:57:07 +00:00
}()
2024-06-13 15:18:32 +00:00
err = server.Config.Services.Logs.Open(ctx, step.ID)
if err != nil {
log.Error().Err(err).Msg("log stream: open failed")
logWriteStringErr(io.WriteString(rw, "event: error\ndata: can't open stream\n\n"))
return
}
2017-07-24 19:57:07 +00:00
go func() {
batches := make(logging.LogChan, maxQueuedBatchesPerClient)
go func() {
defer func() {
if r := recover(); r != nil {
log.Error().Msgf("error sending log message: %v", r)
}
}()
for entries := range batches {
for _, entry := range entries {
select {
case <-ctx.Done():
return
default:
if ee, err := json.Marshal(entry); err == nil {
logChan <- ee
} else {
log.Error().Err(err).Msg("unable to serialize log entry")
}
}
2017-07-24 19:57:07 +00:00
}
}
}()
err := server.Config.Services.Logs.Tail(ctx, step.ID, batches)
if err != nil {
log.Error().Err(err).Msg("tail of logs failed")
}
2017-07-24 19:57:07 +00:00
logWriteStringErr(io.WriteString(rw, "event: error\ndata: eof\n\n"))
2017-07-24 19:57:07 +00:00
cancel(err)
2017-07-24 19:57:07 +00:00
}()
id := 1
last, _ := strconv.Atoi(
c.Request.Header.Get("Last-Event-ID"),
)
if last != 0 {
log.Debug().Msgf("log stream: reconnect: last-event-id: %d", last)
2017-07-24 19:57:07 +00:00
}
// retry: 10000\n
for {
select {
// after 1 hour of idle (no response) end the stream.
// this is more of a safety mechanism than anything,
// and can be removed once the code is more mature.
case <-time.After(time.Hour):
return
case <-rw.CloseNotify():
return
case <-ctx.Done():
return
2017-08-22 15:49:44 +00:00
case <-time.After(time.Second * 30):
logWriteStringErr(io.WriteString(rw, ": ping\n\n"))
2017-08-22 15:49:44 +00:00
flusher.Flush()
case buf, ok := <-logChan:
2017-07-24 19:57:07 +00:00
if ok {
if id > last {
logWriteStringErr(io.WriteString(rw, "id: "+strconv.Itoa(id)))
logWriteStringErr(io.WriteString(rw, "\n"))
logWriteStringErr(io.WriteString(rw, "data: "))
logWriteStringErr(rw.Write(buf))
logWriteStringErr(io.WriteString(rw, "\n\n"))
2017-07-24 19:57:07 +00:00
flusher.Flush()
}
id++
}
}
}
}
func logWriteStringErr(_ int, err error) {
if err != nil {
log.Error().Err(err).Caller(1).Msg("fail to write string")
}
}