woodpecker/server/stream.go

250 lines
5.5 KiB
Go
Raw Normal View History

2018-02-19 22:24:10 +00:00
// Copyright 2018 Drone.IO Inc.
2018-03-21 13:02:17 +00:00
//
2018-02-19 22:24:10 +00:00
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
2018-03-21 13:02:17 +00:00
//
2018-02-19 22:24:10 +00:00
// http://www.apache.org/licenses/LICENSE-2.0
2018-03-21 13:02:17 +00:00
//
2018-02-19 22:24:10 +00:00
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-05-02 19:21:25 +00:00
package server
2015-09-30 01:21:17 +00:00
import (
2017-03-05 07:56:08 +00:00
"context"
"fmt"
2017-07-18 16:27:20 +00:00
"io"
2017-03-16 10:14:02 +00:00
"net/http"
2015-09-30 01:21:17 +00:00
"strconv"
"time"
2015-09-30 01:21:17 +00:00
2019-08-27 11:01:29 +00:00
"github.com/laszlocph/woodpecker/cncd/logging"
"github.com/laszlocph/woodpecker/cncd/pubsub"
"github.com/laszlocph/woodpecker/model"
"github.com/laszlocph/woodpecker/router/middleware/session"
"github.com/laszlocph/woodpecker/store"
2015-09-30 01:21:17 +00:00
"github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
2015-09-30 01:21:17 +00:00
)
2017-07-24 19:57:07 +00:00
//
// event source streaming for compatibility with quic and http2
//
2017-07-18 16:27:20 +00:00
func EventStreamSSE(c *gin.Context) {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
2017-07-24 17:23:22 +00:00
c.Header("X-Accel-Buffering", "no")
2017-07-18 16:27:20 +00:00
rw := c.Writer
flusher, ok := rw.(http.Flusher)
if !ok {
c.String(500, "Streaming not supported")
return
}
2017-08-22 15:49:44 +00:00
// ping the client
io.WriteString(rw, ": ping\n\n")
flusher.Flush()
2017-07-18 16:27:20 +00:00
logrus.Debugf("user feed: connection opened")
user := session.User(c)
repo := map[string]bool{}
if user != nil {
repos, _ := store.FromContext(c).RepoList(user)
for _, r := range repos {
repo[r.FullName] = true
}
}
eventc := make(chan []byte, 10)
ctx, cancel := context.WithCancel(
context.Background(),
)
defer func() {
cancel()
close(eventc)
logrus.Debugf("user feed: connection closed")
}()
go func() {
Config.Services.Pubsub.Subscribe(ctx, "topic/events", func(m pubsub.Message) {
defer func() {
recover() // fix #2480
}()
2017-07-18 16:27:20 +00:00
name := m.Labels["repo"]
priv := m.Labels["private"]
if repo[name] || priv == "false" {
select {
case <-ctx.Done():
return
default:
eventc <- m.Data
}
}
})
cancel()
}()
for {
select {
case <-rw.CloseNotify():
return
case <-ctx.Done():
return
2017-08-22 15:49:44 +00:00
case <-time.After(time.Second * 30):
io.WriteString(rw, ": ping\n\n")
flusher.Flush()
2017-07-18 16:27:20 +00:00
case buf, ok := <-eventc:
if ok {
io.WriteString(rw, "data: ")
rw.Write(buf)
io.WriteString(rw, "\n\n")
flusher.Flush()
}
}
}
}
2017-07-24 19:57:07 +00:00
func LogStreamSSE(c *gin.Context) {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no")
rw := c.Writer
flusher, ok := rw.(http.Flusher)
if !ok {
c.String(500, "Streaming not supported")
return
}
2017-08-22 15:49:44 +00:00
io.WriteString(rw, ": ping\n\n")
flusher.Flush()
2017-07-24 19:57:07 +00:00
// repo := session.Repo(c)
//
// // parse the build number and job sequence number from
// // the repquest parameter.
// num, _ := strconv.Atoi(c.Params.ByName("number"))
// ppid, _ := strconv.Atoi(c.Params.ByName("ppid"))
// name := c.Params.ByName("proc")
//
// build, err := store.GetBuildNumber(c, repo, num)
// if err != nil {
// c.AbortWithError(404, err)
// return
// }
//
// proc, err := store.FromContext(c).ProcChild(build, ppid, name)
// if err != nil {
// c.AbortWithError(404, err)
// return
// }
repo := session.Repo(c)
buildn, _ := strconv.Atoi(c.Param("build"))
jobn, _ := strconv.Atoi(c.Param("number"))
build, err := store.GetBuildNumber(c, repo, buildn)
if err != nil {
logrus.Debugln("stream cannot get build number.", err)
io.WriteString(rw, "event: error\ndata: build not found\n\n")
return
}
proc, err := store.FromContext(c).ProcFind(build, jobn)
if err != nil {
logrus.Debugln("stream cannot get proc number.", err)
io.WriteString(rw, "event: error\ndata: process not found\n\n")
return
}
if proc.State != model.StatusRunning {
logrus.Debugln("stream not found.")
io.WriteString(rw, "event: error\ndata: stream not found\n\n")
return
}
logc := make(chan []byte, 10)
ctx, cancel := context.WithCancel(
context.Background(),
)
logrus.Debugf("log stream: connection opened")
defer func() {
cancel()
close(logc)
logrus.Debugf("log stream: connection closed")
}()
go func() {
// TODO remove global variable
Config.Services.Logs.Tail(ctx, fmt.Sprint(proc.ID), func(entries ...*logging.Entry) {
2018-10-19 05:33:56 +00:00
defer func() {
recover() // fix #2480
}()
2017-07-24 19:57:07 +00:00
for _, entry := range entries {
select {
case <-ctx.Done():
return
default:
logc <- entry.Data
}
}
})
io.WriteString(rw, "event: error\ndata: eof\n\n")
cancel()
}()
id := 1
last, _ := strconv.Atoi(
c.Request.Header.Get("Last-Event-ID"),
)
if last != 0 {
logrus.Debugf("log stream: reconnect: last-event-id: %d", last)
}
// retry: 10000\n
for {
select {
// after 1 hour of idle (no response) end the stream.
// this is more of a safety mechanism than anything,
// and can be removed once the code is more mature.
case <-time.After(time.Hour):
return
case <-rw.CloseNotify():
return
case <-ctx.Done():
return
2017-08-22 15:49:44 +00:00
case <-time.After(time.Second * 30):
io.WriteString(rw, ": ping\n\n")
flusher.Flush()
2017-07-24 19:57:07 +00:00
case buf, ok := <-logc:
if ok {
if id > last {
io.WriteString(rw, "id: "+strconv.Itoa(id))
io.WriteString(rw, "\n")
io.WriteString(rw, "data: ")
rw.Write(buf)
io.WriteString(rw, "\n\n")
flusher.Flush()
}
id++
}
}
}
}