removed legacy code, updated cli

This commit is contained in:
Brad Rydzewski 2017-03-16 18:14:02 +08:00
parent 73f8c62528
commit a95b118cb3
129 changed files with 1041 additions and 7532 deletions

View file

@ -22,17 +22,16 @@ pipeline:
when:
event: push
# archive:
# image: plugins/s3
# acl: public-read
# bucket: downloads.drone.io
# source: release/**/*.*
# access_key: ${AWS_ACCESS_KEY_ID}
# secret_key: ${AWS_SECRET_ACCESS_KEY}
# when:
# event: push
# branch: master
archive:
image: plugins/s3
acl: public-read
bucket: downloads.drone.io
source: release/**/*.*
access_key: ${AWS_ACCESS_KEY_ID}
secret_key: ${AWS_SECRET_ACCESS_KEY}
when:
event: push
branch: master
publish:
image: plugins/docker

View file

@ -1,329 +0,0 @@
package agent
import (
"fmt"
"net"
"net/url"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/drone/drone/build"
"github.com/drone/drone/model"
"github.com/drone/drone/version"
"github.com/drone/drone/yaml"
"github.com/drone/drone/yaml/transform"
"github.com/drone/envsubst"
)
type Logger interface {
Write(*build.Line)
}
type Agent struct {
Update UpdateFunc
Logger LoggerFunc
Engine build.Engine
Timeout time.Duration
Platform string
Namespace string
Extension []string
Escalate []string
Netrc []string
Local string
Pull bool
}
func (a *Agent) Poll() error {
// logrus.Infof("Starting build %s/%s#%d.%d",
// payload.Repo.Owner, payload.Repo.Name, payload.Build.Number, payload.Job.Number)
//
//
// logrus.Infof("Finished build %s/%s#%d.%d",
// payload.Repo.Owner, payload.Repo.Name, payload.Build.Number, payload.Job.Number)
return nil
}
func (a *Agent) Run(payload *model.Work, cancel <-chan bool) error {
payload.Job.Status = model.StatusRunning
payload.Job.Started = time.Now().Unix()
spec, err := a.prep(payload)
if err != nil {
payload.Job.Error = err.Error()
payload.Job.ExitCode = 255
payload.Job.Finished = payload.Job.Started
payload.Job.Status = model.StatusError
a.Update(payload)
return err
}
a.Update(payload)
err = a.exec(spec, payload, cancel)
if err != nil {
payload.Job.ExitCode = 255
payload.Job.Error = err.Error()
}
if exitErr, ok := err.(*build.ExitError); ok {
payload.Job.ExitCode = exitErr.Code
payload.Job.Error = "" // exit errors are already written to the log
}
payload.Job.Finished = time.Now().Unix()
switch payload.Job.ExitCode {
case 128, 130, 137:
payload.Job.Status = model.StatusKilled
case 0:
payload.Job.Status = model.StatusSuccess
default:
payload.Job.Status = model.StatusFailure
}
a.Update(payload)
return err
}
func (a *Agent) prep(w *model.Work) (*yaml.Config, error) {
envs := toEnv(w)
envSecrets := map[string]string{}
// list of secrets to interpolate in the yaml
for _, secret := range w.Secrets {
if (w.Verified || secret.SkipVerify) && secret.MatchEvent(w.Build.Event) {
envSecrets[secret.Name] = secret.Value
}
}
var err error
w.Yaml, err = envsubst.Eval(w.Yaml, func(s string) string {
env, ok := envSecrets[s]
if !ok {
env, _ = envs[s]
}
if strings.Contains(env, "\n") {
env = fmt.Sprintf("%q", env)
}
return env
})
if err != nil {
return nil, err
}
// append secrets when verified or when a secret does not require
// verification
var secrets []*model.Secret
for _, secret := range w.Secrets {
if w.Verified || secret.SkipVerify {
secrets = append(secrets, secret)
}
}
// inject the netrc file into the clone plugin if the repository is
// private and requires authentication.
if w.Repo.IsPrivate {
secrets = append(secrets, &model.Secret{
Name: "DRONE_NETRC_USERNAME",
Value: w.Netrc.Login,
Images: []string{"*"},
Events: []string{"*"},
})
secrets = append(secrets, &model.Secret{
Name: "DRONE_NETRC_PASSWORD",
Value: w.Netrc.Password,
Images: []string{"*"},
Events: []string{"*"},
})
secrets = append(secrets, &model.Secret{
Name: "DRONE_NETRC_MACHINE",
Value: w.Netrc.Machine,
Images: []string{"*"},
Events: []string{"*"},
})
}
conf, err := yaml.ParseString(w.Yaml)
if err != nil {
return nil, err
}
src := "src"
if url, _ := url.Parse(w.Repo.Link); url != nil {
host, _, err := net.SplitHostPort(url.Host)
if err == nil {
url.Host = host
}
src = filepath.Join(src, url.Host, url.Path)
}
transform.Clone(conf, w.Repo.Kind)
transform.Environ(conf, envs)
transform.DefaultFilter(conf)
if w.BuildLast != nil {
transform.ChangeFilter(conf, w.BuildLast.Status)
}
transform.ImageSecrets(conf, secrets, w.Build.Event)
transform.Identifier(conf)
transform.WorkspaceTransform(conf, "/drone", src)
if err := transform.Check(conf, w.Repo.IsTrusted); err != nil {
return nil, err
}
transform.CommandTransform(conf)
transform.ImagePull(conf, a.Pull)
transform.ImageTag(conf)
if err := transform.ImageEscalate(conf, a.Escalate); err != nil {
return nil, err
}
transform.PluginParams(conf)
if a.Local != "" {
transform.PluginDisable(conf, true)
transform.ImageVolume(conf, []string{a.Local + ":" + conf.Workspace.Path})
}
transform.Pod(conf, a.Platform)
if err := transform.RemoteTransform(conf, a.Extension); err != nil {
return nil, err
}
return conf, nil
}
func (a *Agent) exec(spec *yaml.Config, payload *model.Work, cancel <-chan bool) error {
conf := build.Config{
Engine: a.Engine,
Buffer: 500,
}
pipeline := conf.Pipeline(spec)
defer pipeline.Teardown()
// setup the build environment
if err := pipeline.Setup(); err != nil {
return err
}
replacer := NewSecretReplacer(payload.Secrets)
timeout := time.After(time.Duration(payload.Repo.Timeout) * time.Minute)
for {
select {
case <-pipeline.Done():
return pipeline.Err()
case <-cancel:
pipeline.Stop()
return fmt.Errorf("termination request received, build cancelled")
case <-timeout:
pipeline.Stop()
return fmt.Errorf("maximum time limit exceeded, build cancelled")
case <-time.After(a.Timeout):
pipeline.Stop()
return fmt.Errorf("terminal inactive for %v, build cancelled", a.Timeout)
case <-pipeline.Next():
// TODO(bradrydzewski) this entire block of code should probably get
// encapsulated in the pipeline.
status := model.StatusSuccess
if pipeline.Err() != nil {
status = model.StatusFailure
}
// updates the build status passed into each container. I realize this is
// a bit out of place and will work to resolve.
pipeline.Head().Environment["DRONE_BUILD_STATUS"] = status
if !pipeline.Head().Constraints.Match(
a.Platform,
payload.Build.Deploy,
payload.Build.Event,
payload.Build.Branch,
status, payload.Job.Environment) { // TODO: fix this whole section
pipeline.Skip()
} else {
pipeline.Exec()
}
case line := <-pipeline.Pipe():
line.Out = replacer.Replace(line.Out)
a.Logger(line)
}
}
}
func toEnv(w *model.Work) map[string]string {
envs := map[string]string{
"CI": "drone",
"DRONE": "true",
"DRONE_ARCH": "linux/amd64",
"DRONE_REPO": w.Repo.FullName,
"DRONE_REPO_SCM": w.Repo.Kind,
"DRONE_REPO_OWNER": w.Repo.Owner,
"DRONE_REPO_NAME": w.Repo.Name,
"DRONE_REPO_LINK": w.Repo.Link,
"DRONE_REPO_AVATAR": w.Repo.Avatar,
"DRONE_REPO_BRANCH": w.Repo.Branch,
"DRONE_REPO_PRIVATE": fmt.Sprintf("%v", w.Repo.IsPrivate),
"DRONE_REPO_TRUSTED": fmt.Sprintf("%v", w.Repo.IsTrusted),
"DRONE_REMOTE_URL": w.Repo.Clone,
"DRONE_COMMIT_SHA": w.Build.Commit,
"DRONE_COMMIT_REF": w.Build.Ref,
"DRONE_COMMIT_REFSPEC": w.Build.Refspec,
"DRONE_COMMIT_BRANCH": w.Build.Branch,
"DRONE_COMMIT_LINK": w.Build.Link,
"DRONE_COMMIT_MESSAGE": w.Build.Message,
"DRONE_COMMIT_AUTHOR": w.Build.Author,
"DRONE_COMMIT_AUTHOR_EMAIL": w.Build.Email,
"DRONE_COMMIT_AUTHOR_AVATAR": w.Build.Avatar,
"DRONE_BUILD_NUMBER": fmt.Sprintf("%d", w.Build.Number),
"DRONE_BUILD_EVENT": w.Build.Event,
"DRONE_BUILD_STATUS": w.Build.Status,
"DRONE_BUILD_LINK": fmt.Sprintf("%s/%s/%d", w.System.Link, w.Repo.FullName, w.Build.Number),
"DRONE_BUILD_CREATED": fmt.Sprintf("%d", w.Build.Created),
"DRONE_BUILD_STARTED": fmt.Sprintf("%d", w.Build.Started),
"DRONE_BUILD_FINISHED": fmt.Sprintf("%d", w.Build.Finished),
"DRONE_JOB_NUMBER": fmt.Sprintf("%d", w.Job.Number),
"DRONE_JOB_STATUS": w.Job.Status,
"DRONE_JOB_ERROR": w.Job.Error,
"DRONE_JOB_EXIT_CODE": fmt.Sprintf("%d", w.Job.ExitCode),
"DRONE_JOB_STARTED": fmt.Sprintf("%d", w.Job.Started),
"DRONE_JOB_FINISHED": fmt.Sprintf("%d", w.Job.Finished),
"DRONE_YAML_VERIFIED": fmt.Sprintf("%v", w.Verified),
"DRONE_YAML_SIGNED": fmt.Sprintf("%v", w.Signed),
"DRONE_BRANCH": w.Build.Branch,
"DRONE_COMMIT": w.Build.Commit,
"DRONE_VERSION": version.Version,
}
if w.Build.Event == model.EventTag {
envs["DRONE_TAG"] = strings.TrimPrefix(w.Build.Ref, "refs/tags/")
}
if w.Build.Event == model.EventPull {
envs["DRONE_PULL_REQUEST"] = pullRegexp.FindString(w.Build.Ref)
}
if w.Build.Event == model.EventDeploy {
envs["DRONE_DEPLOY_TO"] = w.Build.Deploy
}
if w.BuildLast != nil {
envs["DRONE_PREV_BUILD_STATUS"] = w.BuildLast.Status
envs["DRONE_PREV_BUILD_NUMBER"] = fmt.Sprintf("%v", w.BuildLast.Number)
envs["DRONE_PREV_COMMIT_SHA"] = w.BuildLast.Commit
}
// inject matrix values as environment variables
for key, val := range w.Job.Environment {
envs[key] = val
}
return envs
}
var pullRegexp = regexp.MustCompile("\\d+")

View file

@ -1,46 +0,0 @@
package agent
import (
"strings"
"github.com/drone/drone/model"
)
// SecretReplacer hides secrets from being exposed by the build output.
type SecretReplacer interface {
// Replace conceals instances of secrets found in s.
Replace(s string) string
}
// NewSecretReplacer creates a SecretReplacer based on whether any value in
// secrets requests it be hidden.
func NewSecretReplacer(secrets []*model.Secret) SecretReplacer {
var r []string
for _, s := range secrets {
if s.Conceal {
r = append(r, s.Value, "*****")
}
}
if len(r) == 0 {
return &noopReplacer{}
}
return &secretReplacer{
replacer: strings.NewReplacer(r...),
}
}
type noopReplacer struct{}
func (*noopReplacer) Replace(s string) string {
return s
}
type secretReplacer struct {
replacer *strings.Replacer
}
func (r *secretReplacer) Replace(s string) string {
return r.replacer.Replace(s)
}

View file

@ -1,39 +0,0 @@
package agent
import (
"testing"
"github.com/drone/drone/model"
"github.com/franela/goblin"
)
const testString = "This is SECRET: secret_value"
func TestSecret(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("SecretReplacer", func() {
g.It("Should conceal secret", func() {
secrets := []*model.Secret{
{
Name: "SECRET",
Value: "secret_value",
Conceal: true,
},
}
r := NewSecretReplacer(secrets)
g.Assert(r.Replace(testString)).Equal("This is SECRET: *****")
})
g.It("Should not conceal secret", func() {
secrets := []*model.Secret{
{
Name: "SECRET",
Value: "secret_value",
Conceal: false,
},
}
r := NewSecretReplacer(secrets)
g.Assert(r.Replace(testString)).Equal(testString)
})
})
}

View file

@ -1,67 +0,0 @@
package agent
import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/drone/drone/build"
"github.com/drone/drone/model"
"github.com/drone/mq/logger"
"github.com/drone/mq/stomp"
)
// UpdateFunc handles buid pipeline status updates.
type UpdateFunc func(*model.Work)
// LoggerFunc handles buid pipeline logging updates.
type LoggerFunc func(*build.Line)
var NoopUpdateFunc = func(*model.Work) {}
var TermLoggerFunc = func(line *build.Line) {
fmt.Println(line)
}
// NewClientUpdater returns an updater that sends updated build details
// to the drone server.
func NewClientUpdater(client *stomp.Client) UpdateFunc {
return func(w *model.Work) {
err := client.SendJSON("/queue/updates", w)
if err != nil {
logger.Warningf("Error updating %s/%s#%d.%d. %s",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number, err)
}
if w.Job.Status != model.StatusRunning {
var dest = fmt.Sprintf("/topic/logs.%d", w.Job.ID)
var opts = []stomp.MessageOption{
stomp.WithHeader("eof", "true"),
stomp.WithRetain("all"),
}
if err := client.Send(dest, []byte("eof"), opts...); err != nil {
logger.Warningf("Error sending eof %s/%s#%d.%d. %s",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number, err)
}
}
}
}
func NewClientLogger(client *stomp.Client, id int64, limit int64) LoggerFunc {
var size int64
var dest = fmt.Sprintf("/topic/logs.%d", id)
var opts = []stomp.MessageOption{
stomp.WithRetain("all"),
}
return func(line *build.Line) {
if size > limit {
return
}
if err := client.SendJSON(dest, line, opts...); err != nil {
logrus.Errorf("Error streaming build logs. %s", err)
}
size += int64(len(line.Out))
}
}

View file

@ -1,48 +0,0 @@
package build
import "github.com/drone/drone/yaml"
// Config defines the configuration for creating the Pipeline.
type Config struct {
Engine Engine
// Buffer defines the size of the buffer for the channel to which the
// console output is streamed.
Buffer uint
}
// Pipeline creates a build Pipeline using the specific configuration for
// the given Yaml specification.
func (c *Config) Pipeline(spec *yaml.Config) *Pipeline {
pipeline := Pipeline{
engine: c.Engine,
pipe: make(chan *Line, c.Buffer),
next: make(chan error),
done: make(chan error),
}
var containers []*yaml.Container
containers = append(containers, spec.Services...)
containers = append(containers, spec.Pipeline...)
for _, c := range containers {
if c.Disabled {
continue
}
next := &element{Container: c}
if pipeline.head == nil {
pipeline.head = next
pipeline.tail = next
} else {
pipeline.tail.next = next
pipeline.tail = next
}
}
go func() {
pipeline.next <- nil
}()
return &pipeline
}

View file

@ -1,112 +0,0 @@
package docker
import (
"io"
"github.com/drone/drone/build"
"github.com/drone/drone/build/docker/internal"
"github.com/drone/drone/yaml"
"github.com/samalba/dockerclient"
)
type dockerEngine struct {
client dockerclient.Client
}
func (e *dockerEngine) ContainerStart(container *yaml.Container) (string, error) {
conf := toContainerConfig(container)
auth := toAuthConfig(container)
// pull the image if it does not exists or if the Container
// is configured to always pull a new image.
_, err := e.client.InspectImage(container.Image)
if err != nil || container.Pull {
e.client.PullImage(container.Image, auth)
}
// create and start the container and return the Container ID.
id, err := e.client.CreateContainer(conf, container.ID, auth)
if err != nil {
return id, err
}
err = e.client.StartContainer(id, &conf.HostConfig)
if err != nil {
// remove the container if it cannot be started
e.client.RemoveContainer(id, true, true)
return id, err
}
return id, nil
}
func (e *dockerEngine) ContainerStop(id string) error {
e.client.StopContainer(id, 1)
e.client.KillContainer(id, "9")
return nil
}
func (e *dockerEngine) ContainerRemove(id string) error {
e.client.StopContainer(id, 1)
e.client.KillContainer(id, "9")
e.client.RemoveContainer(id, true, true)
return nil
}
func (e *dockerEngine) ContainerWait(id string) (*build.State, error) {
// wait for the container to exit
//
// TODO(bradrydzewski) we should have a for loop here
// to re-connect and wait if this channel returns a
// result even though the container is still running.
//
<-e.client.Wait(id)
v, err := e.client.InspectContainer(id)
if err != nil {
return nil, err
}
return &build.State{
ExitCode: v.State.ExitCode,
OOMKilled: v.State.OOMKilled,
}, nil
}
func (e *dockerEngine) ContainerLogs(id string) (io.ReadCloser, error) {
opts := &dockerclient.LogOptions{
Follow: true,
Stdout: true,
Stderr: true,
}
piper, pipew := io.Pipe()
go func() {
defer pipew.Close()
// sometimes the docker logs fails due to parsing errors. this
// routine will check for such a failure and attempt to resume
// if necessary.
for i := 0; i < 5; i++ {
if i > 0 {
opts.Tail = 1
}
rc, err := e.client.ContainerLogs(id, opts)
if err != nil {
return
}
defer rc.Close()
// use Docker StdCopy
internal.StdCopy(pipew, pipew, rc)
// check to see if the container is still running. If not,
// we can safely exit and assume there are no more logs left
// to stream.
v, err := e.client.InspectContainer(id)
if err != nil || !v.State.Running {
return
}
}
}()
return piper, nil
}

View file

@ -1 +0,0 @@
package docker

View file

@ -1,25 +0,0 @@
package docker
import (
"github.com/drone/drone/build"
"github.com/samalba/dockerclient"
)
// NewClient returns a new Docker engine using the provided Docker client.
func NewClient(client dockerclient.Client) build.Engine {
return &dockerEngine{client}
}
// New returns a new Docker engine from the provided DOCKER_HOST and
// DOCKER_CERT_PATH environment variables.
func New(host, cert string, tls bool) (build.Engine, error) {
config, err := dockerclient.TLSConfigFromCertPath(cert)
if err == nil && tls {
config.InsecureSkipVerify = true
}
client, err := dockerclient.NewDockerClient(host, config)
if err != nil {
return nil, err
}
return NewClient(client), nil
}

View file

@ -1 +0,0 @@
package docker

View file

@ -1 +0,0 @@
This is an internal copy of the Docker stdcopy package that removes the logrus debug logging. The original package is found at https://github.com/docker/docker/tree/master/pkg/stdcopy

View file

@ -1,167 +0,0 @@
package internal
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
// StdType is the type of standard stream
// a writer can multiplex to.
type StdType byte
const (
// Stdin represents standard input stream type.
Stdin StdType = iota
// Stdout represents standard output stream type.
Stdout
// Stderr represents standard error steam type.
Stderr
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
// stdWriter is wrapper of io.Writer with extra customized info.
type stdWriter struct {
io.Writer
prefix byte
}
// Write sends the buffer to the underneath writer.
// It insert the prefix header before the buffer,
// so stdcopy.StdCopy knows where to multiplex the output.
// It makes stdWriter to implement io.Writer.
func (w *stdWriter) Write(buf []byte) (n int, err error) {
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instantiated")
}
if buf == nil {
return 0, nil
}
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf)))
line := append(header[:], buf...)
n, err = w.Writer.Write(line)
n -= stdWriterPrefixLen
if n < 0 {
n = 0
}
return
}
// NewStdWriter instantiates a new Writer.
// Everything written to it will be encapsulated using a custom format,
// and written to the underlying `w` stream.
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
func NewStdWriter(w io.Writer, t StdType) io.Writer {
return &stdWriter{
Writer: w,
prefix: byte(t),
}
}
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
// previously multiplexed together using a StdWriter instance.
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
//
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
// Make sure we have at least a full header
for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < stdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Check the first byte to know where to write
switch StdType(buf[stdWriterFdIndex]) {
case Stdin:
fallthrough
case Stdout:
// Write on stdout
out = dstout
case Stderr:
// Write on stderr
out = dsterr
default:
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+stdWriterPrefixLen > bufLen {
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+stdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
if ew != nil {
return 0, ew
}
// If the frame has not been fully written: error
if nw != frameSize {
return 0, io.ErrShortWrite
}
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+stdWriterPrefixLen:])
// Move the index
nr -= frameSize + stdWriterPrefixLen
}
}

View file

@ -1,260 +0,0 @@
package internal
import (
"bytes"
"errors"
"io"
"io/ioutil"
"strings"
"testing"
)
func TestNewStdWriter(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
if writer == nil {
t.Fatalf("NewStdWriter with an invalid StdType should not return nil.")
}
}
func TestWriteWithUnitializedStdWriter(t *testing.T) {
writer := stdWriter{
Writer: nil,
prefix: byte(Stdout),
}
n, err := writer.Write([]byte("Something here"))
if n != 0 || err == nil {
t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter")
}
}
func TestWriteWithNilBytes(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
n, err := writer.Write(nil)
if err != nil {
t.Fatalf("Shouldn't have fail when given no data")
}
if n > 0 {
t.Fatalf("Write should have written 0 byte, but has written %d", n)
}
}
func TestWrite(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
data := []byte("Test StdWrite.Write")
n, err := writer.Write(data)
if err != nil {
t.Fatalf("Error while writing with StdWrite")
}
if n != len(data) {
t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n)
}
}
type errWriter struct {
n int
err error
}
func (f *errWriter) Write(buf []byte) (int, error) {
return f.n, f.err
}
func TestWriteWithWriterError(t *testing.T) {
expectedError := errors.New("expected")
expectedReturnedBytes := 10
writer := NewStdWriter(&errWriter{
n: stdWriterPrefixLen + expectedReturnedBytes,
err: expectedError}, Stdout)
data := []byte("This won't get written, sigh")
n, err := writer.Write(data)
if err != expectedError {
t.Fatalf("Didn't get expected error.")
}
if n != expectedReturnedBytes {
t.Fatalf("Didn't get expected written bytes %d, got %d.",
expectedReturnedBytes, n)
}
}
func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) {
writer := NewStdWriter(&errWriter{n: -1}, Stdout)
data := []byte("This won't get written, sigh")
actual, _ := writer.Write(data)
if actual != 0 {
t.Fatalf("Expected returned written bytes equal to 0, got %d", actual)
}
}
func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) {
buffer = new(bytes.Buffer)
dstOut := NewStdWriter(buffer, Stdout)
_, err = dstOut.Write(stdOutBytes)
if err != nil {
return
}
dstErr := NewStdWriter(buffer, Stderr)
_, err = dstErr.Write(stdErrBytes)
return
}
func TestStdCopyWriteAndRead(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer)
if err != nil {
t.Fatal(err)
}
expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes)
if written != int64(expectedTotalWritten) {
t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written)
}
}
type customReader struct {
n int
err error
totalCalls int
correctCalls int
src *bytes.Buffer
}
func (f *customReader) Read(buf []byte) (int, error) {
f.totalCalls++
if f.totalCalls <= f.correctCalls {
return f.src.Read(buf)
}
return f.n, f.err
}
func TestStdCopyReturnsErrorReadingHeader(t *testing.T) {
expectedError := errors.New("error")
reader := &customReader{
err: expectedError}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
if written != 0 {
t.Fatalf("Expected 0 bytes read, got %d", written)
}
if err != expectedError {
t.Fatalf("Didn't get expected error")
}
}
func TestStdCopyReturnsErrorReadingFrame(t *testing.T) {
expectedError := errors.New("error")
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
reader := &customReader{
correctCalls: 1,
n: stdWriterPrefixLen + 1,
err: expectedError,
src: buffer}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
if written != 0 {
t.Fatalf("Expected 0 bytes read, got %d", written)
}
if err != expectedError {
t.Fatalf("Didn't get expected error")
}
}
func TestStdCopyDetectsCorruptedFrame(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
reader := &customReader{
correctCalls: 1,
n: stdWriterPrefixLen + 1,
err: io.EOF,
src: buffer}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
if written != startingBufLen {
t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written)
}
if err != nil {
t.Fatal("Didn't get nil error")
}
}
func TestStdCopyWithInvalidInputHeader(t *testing.T) {
dstOut := NewStdWriter(ioutil.Discard, Stdout)
dstErr := NewStdWriter(ioutil.Discard, Stderr)
src := strings.NewReader("Invalid input")
_, err := StdCopy(dstOut, dstErr, src)
if err == nil {
t.Fatal("StdCopy with invalid input header should fail.")
}
}
func TestStdCopyWithCorruptedPrefix(t *testing.T) {
data := []byte{0x01, 0x02, 0x03}
src := bytes.NewReader(data)
written, err := StdCopy(nil, nil, src)
if err != nil {
t.Fatalf("StdCopy should not return an error with corrupted prefix.")
}
if written != 0 {
t.Fatalf("StdCopy should have written 0, but has written %d", written)
}
}
func TestStdCopyReturnsWriteErrors(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
expectedError := errors.New("expected")
dstOut := &errWriter{err: expectedError}
written, err := StdCopy(dstOut, ioutil.Discard, buffer)
if written != 0 {
t.Fatalf("StdCopy should have written 0, but has written %d", written)
}
if err != expectedError {
t.Fatalf("Didn't get expected error, got %v", err)
}
}
func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
dstOut := &errWriter{n: startingBufLen - 10}
written, err := StdCopy(dstOut, ioutil.Discard, buffer)
if written != 0 {
t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written)
}
if err != io.ErrShortWrite {
t.Fatalf("Didn't get expected io.ErrShortWrite error")
}
}
func BenchmarkWrite(b *testing.B) {
w := NewStdWriter(ioutil.Discard, Stdout)
data := []byte("Test line for testing stdwriter performance\n")
data = bytes.Repeat(data, 100)
b.SetBytes(int64(len(data)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := w.Write(data); err != nil {
b.Fatal(err)
}
}
}

View file

@ -1,102 +0,0 @@
package docker
import (
"fmt"
"strings"
"github.com/drone/drone/yaml"
"github.com/samalba/dockerclient"
)
// helper function that converts the Continer data structure to the exepcted
// dockerclient.ContainerConfig.
func toContainerConfig(c *yaml.Container) *dockerclient.ContainerConfig {
config := &dockerclient.ContainerConfig{
Image: c.Image,
Env: toEnvironmentSlice(c.Environment),
Labels: c.Labels,
Cmd: c.Command,
Entrypoint: c.Entrypoint,
WorkingDir: c.WorkingDir,
HostConfig: dockerclient.HostConfig{
Privileged: c.Privileged,
NetworkMode: c.Network,
Memory: c.MemLimit,
ShmSize: c.ShmSize,
CpuShares: c.CPUShares,
CpuQuota: c.CPUQuota,
CpusetCpus: c.CPUSet,
MemorySwappiness: -1,
OomKillDisable: c.OomKillDisable,
},
}
if len(config.Entrypoint) == 0 {
config.Entrypoint = nil
}
if len(config.Cmd) == 0 {
config.Cmd = nil
}
if len(c.ExtraHosts) > 0 {
config.HostConfig.ExtraHosts = c.ExtraHosts
}
if len(c.DNS) != 0 {
config.HostConfig.Dns = c.DNS
}
if len(c.DNSSearch) != 0 {
config.HostConfig.DnsSearch = c.DNSSearch
}
if len(c.VolumesFrom) != 0 {
config.HostConfig.VolumesFrom = c.VolumesFrom
}
config.Volumes = map[string]struct{}{}
for _, path := range c.Volumes {
if strings.Index(path, ":") == -1 {
config.Volumes[path] = struct{}{}
continue
}
parts := strings.Split(path, ":")
config.Volumes[parts[1]] = struct{}{}
config.HostConfig.Binds = append(config.HostConfig.Binds, path)
}
for _, path := range c.Devices {
if strings.Index(path, ":") == -1 {
continue
}
parts := strings.Split(path, ":")
device := dockerclient.DeviceMapping{
PathOnHost: parts[0],
PathInContainer: parts[1],
CgroupPermissions: "rwm",
}
config.HostConfig.Devices = append(config.HostConfig.Devices, device)
}
return config
}
// helper function that converts the AuthConfig data structure to the exepcted
// dockerclient.AuthConfig.
func toAuthConfig(container *yaml.Container) *dockerclient.AuthConfig {
if container.AuthConfig.Username == "" &&
container.AuthConfig.Password == "" {
return nil
}
return &dockerclient.AuthConfig{
Email: container.AuthConfig.Email,
Username: container.AuthConfig.Username,
Password: container.AuthConfig.Password,
}
}
// helper function that converts a key value map of environment variables to a
// string slice in key=value format.
func toEnvironmentSlice(env map[string]string) []string {
var envs []string
for k, v := range env {
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
}
return envs
}

View file

@ -1,24 +0,0 @@
package docker
import (
"testing"
)
func Test_toContainerConfig(t *testing.T) {
t.Skip()
}
func Test_toAuthConfig(t *testing.T) {
t.Skip()
}
func Test_toEnvironmentSlice(t *testing.T) {
env := map[string]string{
"HOME": "/root",
}
envs := toEnvironmentSlice(env)
want, got := "HOME=/root", envs[0]
if want != got {
t.Errorf("Wanted envar %s got %s", want, got)
}
}

View file

@ -1,16 +0,0 @@
package build
import (
"io"
"github.com/drone/drone/yaml"
)
// Engine defines the container runtime engine.
type Engine interface {
ContainerStart(*yaml.Container) (string, error)
ContainerStop(string) error
ContainerRemove(string) error
ContainerWait(string) (*State, error)
ContainerLogs(string) (io.ReadCloser, error)
}

View file

@ -1,37 +0,0 @@
package build
import (
"errors"
"fmt"
)
var (
// ErrSkip is used as a return value when container execution should be
// skipped at runtime. It is not returned as an error by any function.
ErrSkip = errors.New("Skip")
// ErrTerm is used as a return value when the runner should terminate
// execution and exit. It is not returned as an error by any function.
ErrTerm = errors.New("Terminate")
)
// An ExitError reports an unsuccessful exit.
type ExitError struct {
Name string
Code int
}
// Error returns the error message in string format.
func (e *ExitError) Error() string {
return fmt.Sprintf("%s : exit code %d", e.Name, e.Code)
}
// An OomError reports the process received an OOMKill from the kernel.
type OomError struct {
Name string
}
// Error returns the error message in string format.
func (e *OomError) Error() string {
return fmt.Sprintf("%s : received oom kill", e.Name)
}

View file

@ -1,26 +0,0 @@
package build
import (
"testing"
"github.com/franela/goblin"
)
func TestErrors(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Error messages", func() {
g.It("should include OOM details", func() {
err := OomError{Name: "golang"}
got, want := err.Error(), "golang : received oom kill"
g.Assert(got).Equal(want)
})
g.It("should include Exit code", func() {
err := ExitError{Name: "golang", Code: 255}
got, want := err.Error(), "golang : exit code 255"
g.Assert(got).Equal(want)
})
})
}

View file

@ -1,241 +0,0 @@
package build
import (
"bufio"
"strconv"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/drone/drone/yaml"
)
// element represents a link in the linked list.
type element struct {
*yaml.Container
next *element
}
// Pipeline represents a build pipeline.
type Pipeline struct {
conf *yaml.Config
head *element
tail *element
wait sync.WaitGroup
pipe chan (*Line)
next chan (error)
done chan (error)
err error
containers []string
volumes []string
networks []string
engine Engine
}
// Done returns when the process is done executing.
func (p *Pipeline) Done() <-chan error {
return p.done
}
// Err returns the error for the current process.
func (p *Pipeline) Err() error {
return p.err
}
// Next returns the next step in the process.
func (p *Pipeline) Next() <-chan error {
return p.next
}
// Exec executes the current step.
func (p *Pipeline) Exec() {
go func() {
defer func() {
if r := recover(); r != nil {
logrus.Errorln("recover executing build step", r)
}
}()
err := p.exec(p.head.Container)
if err != nil {
p.err = err
}
p.step()
}()
}
// Skip skips the current step.
func (p *Pipeline) Skip() {
p.step()
}
// Pipe returns the build output pipe.
func (p *Pipeline) Pipe() <-chan *Line {
return p.pipe
}
// Head returns the head item in the list.
func (p *Pipeline) Head() *yaml.Container {
return p.head.Container
}
// Tail returns the tail item in the list.
func (p *Pipeline) Tail() *yaml.Container {
return p.tail.Container
}
// Stop stops the pipeline.
func (p *Pipeline) Stop() {
go func() {
defer func() {
if r := recover(); r != nil {
logrus.Errorln("recover stopping the pipeline", r)
}
}()
p.done <- ErrTerm
}()
}
// Setup prepares the build pipeline environment.
func (p *Pipeline) Setup() error {
return nil
}
// Teardown removes the pipeline environment.
func (p *Pipeline) Teardown() {
for _, id := range p.containers {
p.engine.ContainerRemove(id)
}
close(p.next)
close(p.done)
// TODO we have a race condition here where the program can try to async
// write to a closed pipe channel. This package, in general, needs to be
// tested for race conditions.
// close(p.pipe)
}
// step steps through the pipeline to head.next
func (p *Pipeline) step() {
if p.head == p.tail {
go func() {
defer func() {
if r := recover(); r != nil {
logrus.Errorln("recover executing step function", r)
}
}()
// stop all containers
for _, id := range p.containers {
p.engine.ContainerStop(id)
}
// wait for all logs to terminate
// p.wait.Done() // this is for the ambassador
p.wait.Wait()
// signal completion
p.done <- nil
}()
} else {
go func() {
defer func() {
if r := recover(); r != nil {
logrus.Errorln("recover executing step to head function", r)
}
}()
p.head = p.head.next
p.next <- nil
}()
}
}
// close closes open channels and signals the pipeline is done.
func (p *Pipeline) close(err error) {
go func() {
defer func() {
if r := recover(); r != nil {
logrus.Errorln("recover closing the pipeline", r)
}
}()
p.done <- err
}()
}
func (p *Pipeline) exec(c *yaml.Container) error {
name, err := p.engine.ContainerStart(c)
if err != nil {
return err
}
p.containers = append(p.containers, name)
p.wait.Add(1)
go func() {
defer func() {
if r := recover(); r != nil {
logrus.Errorln("recover writing build output", r)
}
p.wait.Done()
}()
rc, rerr := p.engine.ContainerLogs(name)
if rerr != nil {
return
}
defer rc.Close()
num := 0
now := time.Now().UTC()
scanner := bufio.NewScanner(rc)
for scanner.Scan() {
p.pipe <- &Line{
Proc: c.Name,
Time: int64(time.Since(now).Seconds()),
Pos: num,
Out: scanner.Text(),
}
num++
}
}()
// exit when running container in detached mode in background
if c.Detached {
return nil
}
state, err := p.engine.ContainerWait(name)
if err != nil {
return err
}
p.wait.Add(1)
go func() {
defer func() {
if r := recover(); r != nil {
logrus.Errorln("recover writing exit code to output", r)
}
p.wait.Done()
}()
p.pipe <- &Line{
Proc: c.Name,
Type: ExitCodeLine,
Out: strconv.Itoa(state.ExitCode),
}
}()
if state.OOMKilled {
return &OomError{c.Name}
} else if state.ExitCode != 0 {
return &ExitError{c.Name, state.ExitCode}
}
return nil
}

View file

@ -1,42 +0,0 @@
package build
var sampleYaml = `
image: hello-world
build:
context: .
dockerfile: Dockerfile
workspace:
path: src/github.com/octocat/hello-world
base: /go
pipeline:
test:
image: golang
commands:
- go install
- go test
build:
image: golang
commands:
- go build
when:
event: push
notify:
image: slack
channel: dev
when:
event: failure
services:
database:
image: mysql
networks:
custom:
driver: overlay
volumes:
custom:
driver: blockbridge
`

View file

@ -1,35 +0,0 @@
package build
import "fmt"
const (
StdoutLine int = iota
StderrLine
ExitCodeLine
MetadataLine
ProgressLine
)
// Line is a line of console output.
type Line struct {
Proc string `json:"proc,omitempty"`
Time int64 `json:"time,omitempty"`
Type int `json:"type,omitempty"`
Pos int `json:"pos,omityempty"`
Out string `json:"out,omitempty"`
}
func (l *Line) String() string {
switch l.Type {
case ExitCodeLine:
return fmt.Sprintf("[%s] exit code %s", l.Proc, l.Out)
default:
return fmt.Sprintf("[%s:L%v:%vs] %s", l.Proc, l.Pos, l.Time, l.Out)
}
}
// State defines the state of the container.
type State struct {
ExitCode int // container exit code
OOMKilled bool // container exited due to oom error
}

View file

@ -1,23 +0,0 @@
package build
import (
"testing"
"github.com/franela/goblin"
)
func TestLine(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Line output", func() {
g.It("should prefix string() with metadata", func() {
line := Line{
Proc: "redis",
Time: 60,
Pos: 1,
Out: "starting redis server",
}
g.Assert(line.String()).Equal("[redis:L1:60s] starting redis server")
})
})
}

View file

@ -1,77 +1,37 @@
package agent
import (
"fmt"
"context"
"io"
"log"
"math"
"os"
"os/signal"
"strings"
"net/url"
"sync"
"syscall"
"time"
"github.com/drone/drone/model"
"github.com/drone/mq/logger"
"github.com/drone/mq/stomp"
"github.com/tidwall/redlog"
"github.com/cncd/pipeline/pipeline"
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/backend/docker"
"github.com/cncd/pipeline/pipeline/interrupt"
"github.com/cncd/pipeline/pipeline/multipart"
"github.com/cncd/pipeline/pipeline/rpc"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/samalba/dockerclient"
"github.com/tevino/abool"
"github.com/urfave/cli"
)
// AgentCmd is the exported command for starting the drone agent.
var AgentCmd = cli.Command{
Name: "agent",
Usage: "starts the drone agent",
Action: start,
Action: loop,
Flags: []cli.Flag{
cli.StringFlag{
EnvVar: "DOCKER_HOST",
Name: "docker-host",
Usage: "docker daemon address",
Value: "unix:///var/run/docker.sock",
},
cli.BoolFlag{
EnvVar: "DOCKER_TLS_VERIFY",
Name: "docker-tls-verify",
Usage: "docker daemon supports tlsverify",
},
cli.StringFlag{
EnvVar: "DOCKER_CERT_PATH",
Name: "docker-cert-path",
Usage: "docker certificate directory",
Value: "",
},
cli.IntFlag{
EnvVar: "DOCKER_MAX_PROCS",
Name: "docker-max-procs",
Usage: "limit number of running docker processes",
Value: 2,
},
cli.StringFlag{
EnvVar: "DOCKER_OS",
Name: "docker-os",
Usage: "docker operating system",
Value: "linux",
},
cli.StringFlag{
EnvVar: "DOCKER_ARCH",
Name: "docker-arch",
Usage: "docker architecture system",
Value: "amd64",
},
cli.StringFlag{
EnvVar: "DRONE_SERVER,DRONE_ENDPOINT",
Name: "drone-server",
Usage: "drone server address",
Value: "ws://localhost:8000/ws/broker",
},
cli.StringFlag{
EnvVar: "DRONE_TOKEN",
Name: "drone-token",
Usage: "drone authorization token",
},
cli.StringFlag{
EnvVar: "DRONE_SECRET,DRONE_AGENT_SECRET",
Name: "drone-secret",
@ -83,89 +43,21 @@ var AgentCmd = cli.Command{
Usage: "drone server backoff interval",
Value: time.Second * 15,
},
cli.DurationFlag{
EnvVar: "DRONE_PING",
Name: "ping",
Usage: "drone server ping frequency",
Value: time.Minute * 5,
cli.IntFlag{
Name: "retry-limit",
EnvVar: "DRONE_RETRY_LIMIT",
Value: math.MaxInt32,
},
cli.BoolFlag{
EnvVar: "DRONE_DEBUG",
Name: "debug",
Usage: "start the agent in debug mode",
},
cli.DurationFlag{
EnvVar: "DRONE_TIMEOUT",
Name: "timeout",
Usage: "drone timeout due to log inactivity",
Value: time.Minute * 15,
},
cli.StringFlag{
EnvVar: "DRONE_FILTER",
Name: "filter",
Usage: "filter jobs processed by this agent",
},
cli.IntFlag{
EnvVar: "DRONE_MAX_LOGS",
Name: "max-log-size",
Usage: "drone maximum log size in megabytes",
Value: 5,
},
cli.StringSliceFlag{
EnvVar: "DRONE_PLUGIN_PRIVILEGED",
Name: "privileged",
Usage: "plugins that require privileged mode",
Value: &cli.StringSlice{
"plugins/docker",
"plugins/docker:*",
"plugins/gcr",
"plugins/gcr:*",
"plugins/ecr",
"plugins/ecr:*",
},
},
cli.StringFlag{
EnvVar: "DRONE_PLUGIN_NAMESPACE",
Name: "namespace",
Value: "plugins",
Usage: "default plugin image namespace",
},
cli.BoolTFlag{
EnvVar: "DRONE_PLUGIN_PULL",
Name: "pull",
Usage: "always pull latest plugin images",
},
cli.StringSliceFlag{
EnvVar: "DRONE_YAML_EXTENSION",
Name: "extension",
Usage: "custom plugin extension endpoint",
},
//
//
//
cli.BoolFlag{
EnvVar: "DRONE_CANARY",
Name: "canary",
Usage: "enable experimental features at your own risk",
},
// cli.StringFlag{
// Name: "endpoint",
// EnvVar: "DRONE_ENDPOINT,DRONE_SERVER",
// Value: "ws://localhost:9999/ws/rpc",
// },
// cli.DurationFlag{
// Name: "backoff",
// EnvVar: "DRONE_BACKOFF",
// Value: time.Second * 15,
// },
cli.IntFlag{
Name: "retry-limit",
EnvVar: "DRONE_RETRY_LIMIT",
Value: math.MaxInt32,
},
cli.IntFlag{
Name: "max-procs",
EnvVar: "DRONE_MAX_PROCS",
@ -179,139 +71,188 @@ var AgentCmd = cli.Command{
},
}
func start(c *cli.Context) {
if c.Bool("canary") {
if err := loop(c); err != nil {
fmt.Println(err)
os.Exit(1)
}
return
}
log := redlog.New(os.Stderr)
log.SetLevel(0)
logger.SetLogger(log)
// debug level if requested by user
if c.Bool("debug") {
logrus.SetLevel(logrus.DebugLevel)
log.SetLevel(1)
} else {
logrus.SetLevel(logrus.WarnLevel)
}
var accessToken string
if c.String("drone-secret") != "" {
// secretToken := c.String("drone-secret")
accessToken = c.String("drone-secret")
// accessToken, _ = token.New(token.AgentToken, "").Sign(secretToken)
} else {
accessToken = c.String("drone-token")
}
logger.Noticef("connecting to server %s", c.String("drone-server"))
server := strings.TrimRight(c.String("drone-server"), "/")
tls, err := dockerclient.TLSConfigFromCertPath(c.String("docker-cert-path"))
if err == nil {
tls.InsecureSkipVerify = c.Bool("docker-tls-verify")
}
docker, err := dockerclient.NewDockerClient(c.String("docker-host"), tls)
func loop(c *cli.Context) error {
endpoint, err := url.Parse(
c.String("drone-server"),
)
if err != nil {
logrus.Fatal(err)
return err
}
filter := rpc.Filter{
Labels: map[string]string{
"platform": c.String("platform"),
},
}
var client *stomp.Client
client, err := rpc.NewClient(
endpoint.String(),
rpc.WithRetryLimit(
c.Int("retry-limit"),
),
rpc.WithBackoff(
c.Duration("backoff"),
),
rpc.WithToken(
c.String("drone-secret"),
),
)
if err != nil {
return err
}
defer client.Close()
handler := func(m *stomp.Message) {
running.Add(1)
defer func() {
running.Done()
client.Ack(m.Ack)
sigterm := abool.New()
ctx := context.Background()
ctx = interrupt.WithContextFunc(ctx, func() {
println("ctrl+c received, terminating process")
sigterm.Set()
})
var wg sync.WaitGroup
parallel := c.Int("max-procs")
wg.Add(parallel)
for i := 0; i < parallel; i++ {
go func() {
defer wg.Done()
for {
if sigterm.IsSet() {
return
}
if err := run(ctx, client, filter); err != nil {
log.Printf("build runner encountered error: exiting: %s", err)
return
}
}
}()
r := pipelinet{
drone: client,
docker: docker,
config: config{
platform: c.String("docker-os") + "/" + c.String("docker-arch"),
timeout: c.Duration("timeout"),
namespace: c.String("namespace"),
privileged: c.StringSlice("privileged"),
pull: c.BoolT("pull"),
logs: int64(c.Int("max-log-size")) * 1000000,
extension: c.StringSlice("extension"),
},
}
work := new(model.Work)
m.Unmarshal(work)
r.run(work)
}
handleSignals()
backoff := c.Duration("backoff")
for {
// dial the drone server to establish a TCP connection.
client, err = stomp.Dial(server)
if err != nil {
logger.Warningf("connection failed, retry in %v. %s", backoff, err)
<-time.After(backoff)
continue
}
opts := []stomp.MessageOption{
stomp.WithCredentials("x-token", accessToken),
}
// initialize the stomp session and authenticate.
if err = client.Connect(opts...); err != nil {
logger.Warningf("session failed, retry in %v. %s", backoff, err)
<-time.After(backoff)
continue
}
opts = []stomp.MessageOption{
stomp.WithAck("client"),
stomp.WithPrefetch(
c.Int("docker-max-procs"),
),
}
if filter := c.String("filter"); filter != "" {
opts = append(opts, stomp.WithSelector(filter))
}
// subscribe to the pending build queue.
client.Subscribe("/queue/pending", stomp.HandlerFunc(func(m *stomp.Message) {
go handler(m) // HACK until we a channel based Subscribe implementation
}), opts...)
logger.Noticef("connection established, ready to process builds.")
<-client.Done()
logger.Warningf("connection interrupted, attempting to reconnect.")
}
wg.Wait()
return nil
}
// tracks running builds
var running sync.WaitGroup
const (
maxFileUpload = 5000000
maxLogsUpload = 5000000
)
func handleSignals() {
// Graceful shut-down on SIGINT/SIGTERM
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
signal.Notify(c, syscall.SIGTERM)
func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
log.Println("pipeline: request next execution")
// get the next job from the queue
work, err := client.Next(ctx, filter)
if err != nil {
return err
}
if work == nil {
return nil
}
log.Printf("pipeline: received next execution: %s", work.ID)
// new docker engine
engine, err := docker.NewEnv()
if err != nil {
return err
}
timeout := time.Hour
if minutes := work.Timeout; minutes != 0 {
timeout = time.Duration(minutes) * time.Minute
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
cancelled := abool.New()
go func() {
if werr := client.Wait(ctx, work.ID); werr != nil {
cancelled.SetTo(true)
log.Printf("pipeline: cancel signal received: %s: %s", work.ID, werr)
cancel()
} else {
log.Printf("pipeline: cancel channel closed: %s", work.ID)
}
}()
go func() {
<-c
logger.Warningf("SIGTERM received.")
logger.Warningf("wait for running builds to finish.")
running.Wait()
logger.Warningf("done.")
os.Exit(0)
for {
select {
case <-ctx.Done():
log.Printf("pipeline: cancel ping loop: %s", work.ID)
return
case <-time.After(time.Minute):
log.Printf("pipeline: ping queue: %s", work.ID)
client.Extend(ctx, work.ID)
}
}
}()
state := rpc.State{}
state.Started = time.Now().Unix()
err = client.Update(context.Background(), work.ID, state)
if err != nil {
log.Printf("pipeline: error updating pipeline status: %s: %s", work.ID, err)
}
var uploads sync.WaitGroup
defaultLogger := pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error {
part, rerr := rc.NextPart()
if rerr != nil {
return rerr
}
uploads.Add(1)
writer := rpc.NewLineWriter(client, work.ID, proc.Alias)
rlimit := io.LimitReader(part, maxLogsUpload)
io.Copy(writer, rlimit)
defer func() {
log.Printf("pipeline: finish uploading logs: %s: step %s", work.ID, proc.Alias)
uploads.Done()
}()
part, rerr = rc.NextPart()
if rerr != nil {
return nil
}
rlimit = io.LimitReader(part, maxFileUpload)
mime := part.Header().Get("Content-Type")
if serr := client.Upload(context.Background(), work.ID, mime, rlimit); serr != nil {
log.Printf("pipeline: cannot upload artifact: %s: %s: %s", work.ID, mime, serr)
}
return nil
})
err = pipeline.New(work.Config,
pipeline.WithContext(ctx),
pipeline.WithLogger(defaultLogger),
pipeline.WithTracer(pipeline.DefaultTracer),
pipeline.WithEngine(engine),
).Run()
state.Finished = time.Now().Unix()
state.Exited = true
if err != nil {
state.Error = err.Error()
if xerr, ok := err.(*pipeline.ExitError); ok {
state.ExitCode = xerr.Code
}
if xerr, ok := err.(*pipeline.OomError); ok {
state.ExitCode = xerr.Code
}
if cancelled.IsSet() {
state.ExitCode = 137
} else if state.ExitCode == 0 {
state.ExitCode = 1
}
}
log.Printf("pipeline: execution complete: %s", work.ID)
uploads.Wait()
err = client.Update(context.Background(), work.ID, state)
if err != nil {
log.Printf("Pipeine: error updating pipeline status: %s: %s", work.ID, err)
}
return nil
}

View file

@ -1,85 +0,0 @@
package agent
import (
"time"
"github.com/Sirupsen/logrus"
"github.com/drone/drone/agent"
"github.com/drone/drone/build/docker"
"github.com/drone/drone/model"
"github.com/drone/mq/stomp"
"github.com/samalba/dockerclient"
)
type config struct {
platform string
namespace string
privileged []string
pull bool
logs int64
timeout time.Duration
extension []string
}
type pipelinet struct {
drone *stomp.Client
docker dockerclient.Client
config config
}
func (r *pipelinet) run(w *model.Work) {
// defer func() {
// // r.drone.Ack(id, opts)
// }()
logrus.Infof("Starting build %s/%s#%d.%d",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number)
cancel := make(chan bool, 1)
engine := docker.NewClient(r.docker)
a := agent.Agent{
Update: agent.NewClientUpdater(r.drone),
Logger: agent.NewClientLogger(r.drone, w.Job.ID, r.config.logs),
Engine: engine,
Timeout: r.config.timeout,
Platform: r.config.platform,
Namespace: r.config.namespace,
Escalate: r.config.privileged,
Extension: r.config.extension,
Pull: r.config.pull,
}
cancelFunc := func(m *stomp.Message) {
defer m.Release()
id := m.Header.GetInt64("job-id")
if id == w.Job.ID {
cancel <- true
logrus.Infof("Cancel build %s/%s#%d.%d",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number)
}
}
// signal for canceling the build.
sub, err := r.drone.Subscribe("/topic/cancel", stomp.HandlerFunc(cancelFunc))
if err != nil {
logrus.Errorf("Error subscribing to /topic/cancel. %s", err)
}
defer func() {
r.drone.Unsubscribe(sub)
}()
a.Run(w, cancel)
// if err := r.drone.LogPost(w.Job.ID, ioutil.NopCloser(&buf)); err != nil {
// logrus.Errorf("Error sending logs for %s/%s#%d.%d",
// w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number)
// }
// stream.Close()
logrus.Infof("Finished build %s/%s#%d.%d",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number)
}

View file

@ -1,206 +0,0 @@
package agent
import (
"context"
"io"
"log"
"net/url"
"sync"
"time"
"github.com/cncd/pipeline/pipeline"
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/backend/docker"
"github.com/cncd/pipeline/pipeline/interrupt"
"github.com/cncd/pipeline/pipeline/multipart"
"github.com/cncd/pipeline/pipeline/rpc"
"github.com/codegangsta/cli"
"github.com/tevino/abool"
)
func loop(c *cli.Context) error {
endpoint, err := url.Parse(
c.String("drone-server"),
)
if err != nil {
return err
}
filter := rpc.Filter{
Labels: map[string]string{
"platform": c.String("platform"),
},
}
client, err := rpc.NewClient(
endpoint.String(),
rpc.WithRetryLimit(
c.Int("retry-limit"),
),
rpc.WithBackoff(
c.Duration("backoff"),
),
rpc.WithToken(
c.String("drone-secret"),
),
)
if err != nil {
return err
}
defer client.Close()
sigterm := abool.New()
ctx := context.Background()
ctx = interrupt.WithContextFunc(ctx, func() {
println("ctrl+c received, terminating process")
sigterm.Set()
})
var wg sync.WaitGroup
parallel := c.Int("max-procs")
wg.Add(parallel)
for i := 0; i < parallel; i++ {
go func() {
defer wg.Done()
for {
if sigterm.IsSet() {
return
}
if err := run(ctx, client, filter); err != nil {
log.Printf("build runner encountered error: exiting: %s", err)
return
}
}
}()
}
wg.Wait()
return nil
}
const (
maxFileUpload = 5000000
maxLogsUpload = 5000000
)
func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
log.Println("pipeline: request next execution")
// get the next job from the queue
work, err := client.Next(ctx, filter)
if err != nil {
return err
}
if work == nil {
return nil
}
log.Printf("pipeline: received next execution: %s", work.ID)
// new docker engine
engine, err := docker.NewEnv()
if err != nil {
return err
}
timeout := time.Hour
if minutes := work.Timeout; minutes != 0 {
timeout = time.Duration(minutes) * time.Minute
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
cancelled := abool.New()
go func() {
if werr := client.Wait(ctx, work.ID); werr != nil {
cancelled.SetTo(true)
log.Printf("pipeline: cancel signal received: %s: %s", work.ID, werr)
cancel()
} else {
log.Printf("pipeline: cancel channel closed: %s", work.ID)
}
}()
go func() {
for {
select {
case <-ctx.Done():
log.Printf("pipeline: cancel ping loop: %s", work.ID)
return
case <-time.After(time.Minute):
log.Printf("pipeline: ping queue: %s", work.ID)
client.Extend(ctx, work.ID)
}
}
}()
state := rpc.State{}
state.Started = time.Now().Unix()
err = client.Update(context.Background(), work.ID, state)
if err != nil {
log.Printf("pipeline: error updating pipeline status: %s: %s", work.ID, err)
}
var uploads sync.WaitGroup
defaultLogger := pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error {
part, rerr := rc.NextPart()
if rerr != nil {
return rerr
}
uploads.Add(1)
writer := rpc.NewLineWriter(client, work.ID, proc.Alias)
rlimit := io.LimitReader(part, maxLogsUpload)
io.Copy(writer, rlimit)
defer func() {
log.Printf("pipeline: finish uploading logs: %s: step %s", work.ID, proc.Alias)
uploads.Done()
}()
part, rerr = rc.NextPart()
if rerr != nil {
return nil
}
rlimit = io.LimitReader(part, maxFileUpload)
mime := part.Header().Get("Content-Type")
if serr := client.Upload(context.Background(), work.ID, mime, rlimit); serr != nil {
log.Printf("pipeline: cannot upload artifact: %s: %s: %s", work.ID, mime, serr)
}
return nil
})
err = pipeline.New(work.Config,
pipeline.WithContext(ctx),
pipeline.WithLogger(defaultLogger),
pipeline.WithTracer(pipeline.DefaultTracer),
pipeline.WithEngine(engine),
).Run()
state.Finished = time.Now().Unix()
state.Exited = true
if err != nil {
state.Error = err.Error()
if xerr, ok := err.(*pipeline.ExitError); ok {
state.ExitCode = xerr.Code
}
if xerr, ok := err.(*pipeline.OomError); ok {
state.ExitCode = xerr.Code
}
if cancelled.IsSet() {
state.ExitCode = 137
} else if state.ExitCode == 0 {
state.ExitCode = 1
}
}
log.Printf("pipeline: execution complete: %s", work.ID)
uploads.Wait()
err = client.Update(context.Background(), work.ID, state)
if err != nil {
log.Printf("Pipeine: error updating pipeline status: %s: %s", work.ID, err)
}
return nil
}

View file

@ -1,64 +0,0 @@
package main
import (
"html/template"
"log"
"os"
"time"
"github.com/codegangsta/cli"
)
var agentsCmd = cli.Command{
Name: "agents",
Usage: "manage agents",
Action: func(c *cli.Context) {
if err := agentList(c); err != nil {
log.Fatalln(err)
}
},
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",
Usage: "format output",
Value: tmplAgentList,
},
},
}
func agentList(c *cli.Context) error {
client, err := newClient(c)
if err != nil {
return err
}
agents, err := client.AgentList()
if err != nil {
return err
}
tmpl, err := template.New("_").Funcs(funcMap).Parse(c.String("format") + "\n")
if err != nil {
return err
}
for _, agent := range agents {
tmpl.Execute(os.Stdout, agent)
}
return nil
}
// template for build list information
var tmplAgentList = "\x1b[33m{{ .Address }} \x1b[0m" + `
Platform: {{ .Platform }}
Capacity: {{ .Capacity }} concurrent build(s)
Pinged: {{ since .Updated }} ago
Uptime: {{ since .Created }}
`
var funcMap = template.FuncMap{
"since": func(t int64) string {
d := time.Now().Sub(time.Unix(t, 0))
return d.String()
},
}

View file

@ -1,6 +1,6 @@
package main
import "github.com/codegangsta/cli"
import "github.com/urfave/cli"
var buildCmd = cli.Command{
Name: "build",

View file

@ -1,22 +1,17 @@
package main
import (
"log"
"os"
"strconv"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var buildInfoCmd = cli.Command{
Name: "info",
Usage: "show build details",
Action: func(c *cli.Context) {
if err := buildInfo(c); err != nil {
log.Fatalln(err)
}
},
Name: "info",
Usage: "show build details",
Action: buildInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -1,21 +1,16 @@
package main
import (
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var buildLastCmd = cli.Command{
Name: "last",
Usage: "show latest build details",
Action: func(c *cli.Context) {
if err := buildLast(c); err != nil {
log.Fatalln(err)
}
},
Name: "last",
Usage: "show latest build details",
Action: buildLast,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -1,21 +1,16 @@
package main
import (
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var buildListCmd = cli.Command{
Name: "list",
Usage: "show build history",
Action: func(c *cli.Context) {
if err := buildList(c); err != nil {
log.Fatalln(err)
}
},
Name: "list",
Usage: "show build history",
Action: buildList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -6,8 +6,8 @@ import (
"log"
"strconv"
"github.com/codegangsta/cli"
"github.com/drone/drone/build"
"github.com/cncd/pipeline/pipeline/rpc"
"github.com/urfave/cli"
)
var buildLogsCmd = cli.Command{
@ -61,7 +61,7 @@ func buildLogs(c *cli.Context) error {
dec := json.NewDecoder(r)
fmt.Printf("Logs for build %s/%s#%d.%d\n", owner, name, number, job)
var line build.Line
var line rpc.Line
_, err = dec.Token()
if err != nil {

View file

@ -2,21 +2,16 @@ package main
import (
"fmt"
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var buildQueueCmd = cli.Command{
Name: "queue",
Usage: "show build queue",
Action: func(c *cli.Context) {
if err := buildQueue(c); err != nil {
log.Fatalln(err)
}
},
Name: "queue",
Usage: "show build queue",
Action: buildQueue,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -2,21 +2,16 @@ package main
import (
"fmt"
"log"
"strconv"
"github.com/codegangsta/cli"
"github.com/drone/drone/model"
"github.com/urfave/cli"
)
var buildStartCmd = cli.Command{
Name: "start",
Usage: "start a build",
Action: func(c *cli.Context) {
if err := buildStart(c); err != nil {
log.Fatalln(err)
}
},
Name: "start",
Usage: "start a build",
Action: buildStart,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "fork",

View file

@ -2,20 +2,15 @@ package main
import (
"fmt"
"log"
"strconv"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var buildStopCmd = cli.Command{
Name: "stop",
Usage: "stop a build",
Action: func(c *cli.Context) {
if err := buildStop(c); err != nil {
log.Fatalln(err)
}
},
Name: "stop",
Usage: "stop a build",
Action: buildStop,
}
func buildStop(c *cli.Context) (err error) {

View file

@ -1 +0,0 @@
package main

View file

@ -3,22 +3,17 @@ package main
import (
"fmt"
"html/template"
"log"
"os"
"strconv"
"github.com/codegangsta/cli"
"github.com/drone/drone/model"
"github.com/urfave/cli"
)
var deployCmd = cli.Command{
Name: "deploy",
Usage: "deploy code",
Action: func(c *cli.Context) {
if err := deploy(c); err != nil {
log.Fatalln(err)
}
},
Name: "deploy",
Usage: "deploy code",
Action: deploy,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -1,22 +1,26 @@
package main
import (
"fmt"
"io/ioutil"
"context"
"io"
"log"
"os"
"os/signal"
"path"
"path/filepath"
"strings"
"time"
"github.com/drone/drone/agent"
"github.com/drone/drone/build/docker"
"github.com/drone/drone/model"
"github.com/drone/drone/yaml"
"github.com/cncd/pipeline/pipeline"
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/backend/docker"
"github.com/cncd/pipeline/pipeline/frontend"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
"github.com/cncd/pipeline/pipeline/frontend/yaml/compiler"
"github.com/cncd/pipeline/pipeline/interrupt"
"github.com/cncd/pipeline/pipeline/multipart"
"github.com/drone/envsubst"
"github.com/codegangsta/cli"
"github.com/joho/godotenv"
"github.com/urfave/cli"
)
var execCmd = cli.Command{
@ -33,439 +37,387 @@ var execCmd = cli.Command{
Usage: "build from local directory",
EnvVar: "DRONE_LOCAL",
},
cli.StringSliceFlag{
Name: "secret",
Usage: "build secrets in KEY=VALUE format",
EnvVar: "DRONE_SECRET",
},
cli.StringFlag{
Name: "secrets-file",
Usage: "build secrets file in KEY=VALUE format",
EnvVar: "DRONE_SECRETS_FILE",
},
cli.StringSliceFlag{
Name: "matrix",
Usage: "build matrix in KEY=VALUE format",
EnvVar: "DRONE_MATRIX",
},
cli.DurationFlag{
Name: "timeout",
Usage: "build timeout",
Value: time.Hour,
EnvVar: "DRONE_TIMEOUT",
},
cli.DurationFlag{
Name: "timeout.inactivity",
Usage: "build timeout for inactivity",
Value: time.Minute * 15,
EnvVar: "DRONE_TIMEOUT_INACTIVITY",
},
cli.BoolFlag{
EnvVar: "DRONE_PLUGIN_PULL",
Name: "pull",
Usage: "always pull latest plugin images",
cli.StringSliceFlag{
Name: "volumes",
Usage: "build volumes",
EnvVar: "DRONE_VOLUMES",
},
cli.StringSliceFlag{
EnvVar: "DRONE_PLUGIN_PRIVILEGED",
Name: "privileged",
Usage: "plugins that require privileged mode",
Name: "privileged",
Usage: "privileged plugins",
Value: &cli.StringSlice{
"plugins/docker",
"plugins/docker:*",
"plugins/gcr",
"plugins/gcr:*",
"plugins/ecr",
"plugins/ecr:*",
},
},
// Docker daemon flags
cli.StringFlag{
EnvVar: "DOCKER_HOST",
Name: "docker-host",
Usage: "docker daemon address",
Value: "unix:///var/run/docker.sock",
},
cli.BoolFlag{
EnvVar: "DOCKER_TLS_VERIFY",
Name: "docker-tls-verify",
Usage: "docker daemon supports tlsverify",
},
cli.StringFlag{
EnvVar: "DOCKER_CERT_PATH",
Name: "docker-cert-path",
Usage: "docker certificate directory",
Value: "",
},
//
// Please note the below flags are mirrored in the plugin starter kit and
// should be kept synchronized.
// https://github.com/drone/drone-plugin-starter
// Please note the below flags are mirrored in the pipec and
// should be kept synchronized. Do not edit directly
// https://github.com/cncd/pipeline/pipec
//
//
// workspace default
//
cli.StringFlag{
Name: "repo.fullname",
Usage: "repository full name",
EnvVar: "DRONE_REPO",
Name: "workspace-base",
Value: "/pipeline",
EnvVar: "DRONE_WORKSPACE_BASE",
},
cli.StringFlag{
Name: "repo.owner",
Usage: "repository owner",
EnvVar: "DRONE_REPO_OWNER",
Name: "workspace-path",
Value: "src",
EnvVar: "DRONE_WORKSPACE_PATH",
},
//
// netrc parameters
//
cli.StringFlag{
Name: "repo.name",
Usage: "repository name",
EnvVar: "DRONE_REPO_NAME",
},
cli.StringFlag{
Name: "repo.type",
Value: "git",
Usage: "repository type",
EnvVar: "DRONE_REPO_SCM",
},
cli.StringFlag{
Name: "repo.link",
Usage: "repository link",
EnvVar: "DRONE_REPO_LINK",
},
cli.StringFlag{
Name: "repo.avatar",
Usage: "repository avatar",
EnvVar: "DRONE_REPO_AVATAR",
},
cli.StringFlag{
Name: "repo.branch",
Usage: "repository default branch",
EnvVar: "DRONE_REPO_BRANCH",
},
cli.BoolFlag{
Name: "repo.private",
Usage: "repository is private",
EnvVar: "DRONE_REPO_PRIVATE",
},
cli.BoolTFlag{
Name: "repo.trusted",
Usage: "repository is trusted",
EnvVar: "DRONE_REPO_TRUSTED",
},
cli.StringFlag{
Name: "remote.url",
Usage: "git remote url",
EnvVar: "DRONE_REMOTE_URL",
},
cli.StringFlag{
Name: "commit.sha",
Usage: "git commit sha",
EnvVar: "DRONE_COMMIT_SHA",
},
cli.StringFlag{
Name: "commit.ref",
Value: "refs/heads/master",
Usage: "git commit ref",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "commit.branch",
Value: "master",
Usage: "git commit branch",
EnvVar: "DRONE_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "commit.message",
Usage: "git commit message",
EnvVar: "DRONE_COMMIT_MESSAGE",
},
cli.StringFlag{
Name: "commit.link",
Usage: "git commit link",
EnvVar: "DRONE_COMMIT_LINK",
},
cli.StringFlag{
Name: "commit.author.name",
Usage: "git author name",
EnvVar: "DRONE_COMMIT_AUTHOR",
},
cli.StringFlag{
Name: "commit.author.email",
Usage: "git author email",
EnvVar: "DRONE_COMMIT_AUTHOR_EMAIL",
},
cli.StringFlag{
Name: "commit.author.avatar",
Usage: "git author avatar",
EnvVar: "DRONE_COMMIT_AUTHOR_AVATAR",
},
cli.StringFlag{
Name: "build.event",
Value: "push",
Usage: "build event",
EnvVar: "DRONE_BUILD_EVENT",
},
cli.IntFlag{
Name: "build.number",
Usage: "build number",
EnvVar: "DRONE_BUILD_NUMBER",
},
cli.IntFlag{
Name: "build.created",
Usage: "build created",
EnvVar: "DRONE_BUILD_CREATED",
},
cli.IntFlag{
Name: "build.started",
Usage: "build started",
EnvVar: "DRONE_BUILD_STARTED",
},
cli.IntFlag{
Name: "build.finished",
Usage: "build finished",
EnvVar: "DRONE_BUILD_FINISHED",
},
cli.StringFlag{
Name: "build.status",
Usage: "build status",
Value: "success",
EnvVar: "DRONE_BUILD_STATUS",
},
cli.StringFlag{
Name: "build.link",
Usage: "build link",
EnvVar: "DRONE_BUILD_LINK",
},
cli.StringFlag{
Name: "build.deploy",
Usage: "build deployment target",
EnvVar: "DRONE_DEPLOY_TO",
},
cli.BoolTFlag{
Name: "yaml.verified",
Usage: "build yaml is verified",
EnvVar: "DRONE_YAML_VERIFIED",
},
cli.BoolTFlag{
Name: "yaml.signed",
Usage: "build yaml is signed",
EnvVar: "DRONE_YAML_SIGNED",
},
cli.IntFlag{
Name: "prev.build.number",
Usage: "previous build number",
EnvVar: "DRONE_PREV_BUILD_NUMBER",
},
cli.StringFlag{
Name: "prev.build.status",
Usage: "previous build status",
EnvVar: "DRONE_PREV_BUILD_STATUS",
},
cli.StringFlag{
Name: "prev.commit.sha",
Usage: "previous build sha",
EnvVar: "DRONE_PREV_COMMIT_SHA",
},
cli.StringFlag{
Name: "netrc.username",
Usage: "previous build sha",
Name: "netrc-username",
EnvVar: "DRONE_NETRC_USERNAME",
},
cli.StringFlag{
Name: "netrc.password",
Usage: "previous build sha",
Name: "netrc-password",
EnvVar: "DRONE_NETRC_PASSWORD",
},
cli.StringFlag{
Name: "netrc.machine",
Usage: "previous build sha",
Name: "netrc-machine",
EnvVar: "DRONE_NETRC_MACHINE",
},
//
// metadata parameters
//
cli.StringFlag{
Name: "system-arch",
Value: "linux/amd64",
EnvVar: "DRONE_SYSTEM_ARCH",
},
cli.StringFlag{
Name: "system-name",
Value: "pipec",
EnvVar: "DRONE_SYSTEM_NAME",
},
cli.StringFlag{
Name: "system-link",
Value: "https://github.com/cncd/pipec",
EnvVar: "DRONE_SYSTEM_LINK",
},
cli.StringFlag{
Name: "repo-name",
EnvVar: "DRONE_REPO_NAME",
},
cli.StringFlag{
Name: "repo-link",
EnvVar: "DRONE_REPO_LINK",
},
cli.StringFlag{
Name: "repo-remote-url",
EnvVar: "DRONE_REPO_REMOTE",
},
cli.StringFlag{
Name: "repo-private",
EnvVar: "DRONE_REPO_PRIVATE",
},
cli.IntFlag{
Name: "build-number",
EnvVar: "DRONE_BUILD_NUMBER",
},
cli.Int64Flag{
Name: "build-created",
EnvVar: "DRONE_BUILD_CREATED",
},
cli.Int64Flag{
Name: "build-started",
EnvVar: "DRONE_BUILD_STARTED",
},
cli.Int64Flag{
Name: "build-finished",
EnvVar: "DRONE_BUILD_FINISHED",
},
cli.StringFlag{
Name: "build-status",
EnvVar: "DRONE_BUILD_STATUS",
},
cli.StringFlag{
Name: "build-event",
EnvVar: "DRONE_BUILD_EVENT",
},
cli.StringFlag{
Name: "build-link",
EnvVar: "DRONE_BUILD_LINK",
},
cli.StringFlag{
Name: "build-target",
EnvVar: "DRONE_BUILD_TARGET",
},
cli.StringFlag{
Name: "commit-sha",
EnvVar: "DRONE_COMMIT_SHA",
},
cli.StringFlag{
Name: "commit-ref",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "commit-refspec",
EnvVar: "DRONE_COMMIT_REFSPEC",
},
cli.StringFlag{
Name: "commit-branch",
EnvVar: "DRONE_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "commit-message",
EnvVar: "DRONE_COMMIT_MESSAGE",
},
cli.StringFlag{
Name: "commit-author-name",
EnvVar: "DRONE_COMMIT_AUTHOR_NAME",
},
cli.StringFlag{
Name: "commit-author-avatar",
EnvVar: "DRONE_COMMIT_AUTHOR_AVATAR",
},
cli.StringFlag{
Name: "commit-author-email",
EnvVar: "DRONE_COMMIT_AUTHOR_EMAIL",
},
cli.IntFlag{
Name: "prev-build-number",
EnvVar: "DRONE_PREV_BUILD_NUMBER",
},
cli.Int64Flag{
Name: "prev-build-created",
EnvVar: "DRONE_PREV_BUILD_CREATED",
},
cli.Int64Flag{
Name: "prev-build-started",
EnvVar: "DRONE_PREV_BUILD_STARTED",
},
cli.Int64Flag{
Name: "prev-build-finished",
EnvVar: "DRONE_PREV_BUILD_FINISHED",
},
cli.StringFlag{
Name: "prev-build-status",
EnvVar: "DRONE_PREV_BUILD_STATUS",
},
cli.StringFlag{
Name: "prev-build-event",
EnvVar: "DRONE_PREV_BUILD_EVENT",
},
cli.StringFlag{
Name: "prev-build-link",
EnvVar: "DRONE_PREV_BUILD_LINK",
},
cli.StringFlag{
Name: "prev-commit-sha",
EnvVar: "DRONE_PREV_COMMIT_SHA",
},
cli.StringFlag{
Name: "prev-commit-ref",
EnvVar: "DRONE_PREV_COMMIT_REF",
},
cli.StringFlag{
Name: "prev-commit-refspec",
EnvVar: "DRONE_PREV_COMMIT_REFSPEC",
},
cli.StringFlag{
Name: "prev-commit-branch",
EnvVar: "DRONE_PREV_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "prev-commit-message",
EnvVar: "DRONE_PREV_COMMIT_MESSAGE",
},
cli.StringFlag{
Name: "prev-commit-author-name",
EnvVar: "DRONE_PREV_COMMIT_AUTHOR_NAME",
},
cli.StringFlag{
Name: "prev-commit-author-avatar",
EnvVar: "DRONE_PREV_COMMIT_AUTHOR_AVATAR",
},
cli.StringFlag{
Name: "prev-commit-author-email",
EnvVar: "DRONE_PREV_COMMIT_AUTHOR_EMAIL",
},
cli.IntFlag{
Name: "job-number",
EnvVar: "DRONE_JOB_NUMBER",
},
},
}
func exec(c *cli.Context) error {
sigterm := make(chan os.Signal, 1)
cancelc := make(chan bool, 1)
signal.Notify(sigterm, os.Interrupt)
go func() {
<-sigterm
cancelc <- true
}()
path := c.Args().First()
if path == "" {
path = ".drone.yml"
file := c.Args().First()
if file == "" {
file = ".drone.yml"
}
path, _ = filepath.Abs(path)
dir := filepath.Dir(path)
file, err := ioutil.ReadFile(path)
metadata := metadataFromContext(c)
environ := metadata.Environ()
for k, v := range metadata.EnvironDrone() {
environ[k] = v
}
for _, env := range os.Environ() {
k := strings.Split(env, "=")[0]
v := strings.Split(env, "=")[1]
environ[k] = v
}
tmpl, err := envsubst.ParseFile(file)
if err != nil {
return err
}
confstr, err := tmpl.Execute(func(name string) string {
return environ[name]
})
if err != nil {
return err
}
engine, err := docker.New(
c.String("docker-host"),
c.String("docker-cert-path"),
c.Bool("docker-tls-verify"),
)
conf, err := yaml.ParseString(confstr)
if err != nil {
return err
}
a := agent.Agent{
Update: agent.NoopUpdateFunc,
Logger: agent.TermLoggerFunc,
Engine: engine,
Timeout: c.Duration("timeout.inactivity"),
Platform: "linux/amd64",
Escalate: c.StringSlice("privileged"),
Netrc: []string{},
Local: dir,
Pull: c.Bool("pull"),
}
payload := &model.Work{
Yaml: string(file),
Verified: c.BoolT("yaml.verified"),
Signed: c.BoolT("yaml.signed"),
Repo: &model.Repo{
FullName: c.String("repo.fullname"),
Owner: c.String("repo.owner"),
Name: c.String("repo.name"),
Kind: c.String("repo.type"),
Link: c.String("repo.link"),
Branch: c.String("repo.branch"),
Avatar: c.String("repo.avatar"),
Timeout: int64(c.Duration("timeout").Minutes()),
IsPrivate: c.Bool("repo.private"),
IsTrusted: c.BoolT("repo.trusted"),
Clone: c.String("remote.url"),
},
System: &model.System{
Link: c.GlobalString("server"),
},
Secrets: getSecrets(c),
Netrc: &model.Netrc{
Login: c.String("netrc.username"),
Password: c.String("netrc.password"),
Machine: c.String("netrc.machine"),
},
Build: &model.Build{
Commit: c.String("commit.sha"),
Branch: c.String("commit.branch"),
Ref: c.String("commit.ref"),
Link: c.String("commit.link"),
Message: c.String("commit.message"),
Author: c.String("commit.author.name"),
Email: c.String("commit.author.email"),
Avatar: c.String("commit.author.avatar"),
Number: c.Int("build.number"),
Event: c.String("build.event"),
Deploy: c.String("build.deploy"),
},
BuildLast: &model.Build{
Number: c.Int("prev.build.number"),
Status: c.String("prev.build.status"),
Commit: c.String("prev.commit.sha"),
},
}
if len(c.StringSlice("matrix")) > 0 {
p := *payload
p.Job = &model.Job{
Environment: getMatrix(c),
// configure volumes for local execution
volumes := c.StringSlice("volumes")
if c.Bool("local") {
var (
workspaceBase = conf.Workspace.Base
workspacePath = conf.Workspace.Path
)
if workspaceBase == "" {
workspaceBase = c.String("workspace-base")
}
return a.Run(&p, cancelc)
if workspacePath == "" {
workspacePath = c.String("workspace-path")
}
dir, _ := filepath.Abs(filepath.Dir(file))
volumes = append(volumes, dir+":"+path.Join(workspaceBase, workspacePath))
}
axes, err := yaml.ParseMatrix(file)
// compiles the yaml file
compiled := compiler.New(
compiler.WithEscalated(
c.StringSlice("privileged")...,
),
compiler.WithVolumes(volumes...),
compiler.WithWorkspace(
c.String("workspace-base"),
c.String("workspace-path"),
),
compiler.WithPrefix(
c.String("prefix"),
),
compiler.WithProxy(),
compiler.WithLocal(
c.Bool("local"),
),
compiler.WithNetrc(
c.String("netrc-username"),
c.String("netrc-password"),
c.String("netrc-machine"),
),
compiler.WithMetadata(metadata),
).Compile(conf)
engine, err := docker.NewEnv()
if err != nil {
return err
}
if len(axes) == 0 {
axes = append(axes, yaml.Axis{})
}
ctx, cancel := context.WithTimeout(context.Background(), c.Duration("timeout"))
defer cancel()
ctx = interrupt.WithContext(ctx)
var jobs []*model.Job
count := 0
for _, axis := range axes {
jobs = append(jobs, &model.Job{
Number: count,
Environment: axis,
})
count++
}
for _, job := range jobs {
fmt.Printf("Running Matrix job #%d\n", job.Number)
p := *payload
p.Job = job
if err := a.Run(&p, cancelc); err != nil {
return err
}
}
return nil
return pipeline.New(compiled,
pipeline.WithContext(ctx),
pipeline.WithLogger(defaultLogger),
pipeline.WithTracer(pipeline.DefaultTracer),
pipeline.WithLogger(defaultLogger),
pipeline.WithEngine(engine),
).Run()
}
// helper function to retrieve matrix variables.
func getMatrix(c *cli.Context) map[string]string {
envs := map[string]string{}
for _, s := range c.StringSlice("matrix") {
parts := strings.SplitN(s, "=", 2)
if len(parts) != 2 {
continue
}
k := parts[0]
v := parts[1]
envs[k] = v
}
return envs
}
// helper function to retrieve secret variables.
func getSecrets(c *cli.Context) []*model.Secret {
var secrets []*model.Secret
if c.String("secrets-file") != "" {
envs, _ := godotenv.Read(c.String("secrets-file"))
for k, v := range envs {
secret := &model.Secret{
Name: k,
Value: v,
Events: []string{
model.EventPull,
model.EventPush,
model.EventTag,
model.EventDeploy,
// return the metadata from the cli context.
func metadataFromContext(c *cli.Context) frontend.Metadata {
return frontend.Metadata{
Repo: frontend.Repo{
Name: c.String("repo-name"),
Link: c.String("repo-link"),
Remote: c.String("repo-remote-url"),
Private: c.Bool("repo-private"),
},
Curr: frontend.Build{
Number: c.Int("build-number"),
Created: c.Int64("build-created"),
Started: c.Int64("build-started"),
Finished: c.Int64("build-finished"),
Status: c.String("build-status"),
Event: c.String("build-event"),
Link: c.String("build-link"),
Target: c.String("build-target"),
Commit: frontend.Commit{
Sha: c.String("commit-sha"),
Ref: c.String("commit-ref"),
Refspec: c.String("commit-refspec"),
Branch: c.String("commit-branch"),
Message: c.String("commit-message"),
Author: frontend.Author{
Name: c.String("commit-author-name"),
Email: c.String("commit-author-email"),
Avatar: c.String("commit-author-avatar"),
},
Images: []string{"*"},
}
secrets = append(secrets, secret)
}
}
for _, s := range c.StringSlice("secret") {
parts := strings.SplitN(s, "=", 2)
if len(parts) != 2 {
continue
}
secret := &model.Secret{
Name: parts[0],
Value: parts[1],
Events: []string{
model.EventPull,
model.EventPush,
model.EventTag,
model.EventDeploy,
},
Images: []string{"*"},
}
secrets = append(secrets, secret)
},
Prev: frontend.Build{
Number: c.Int("prev-build-number"),
Created: c.Int64("prev-build-created"),
Started: c.Int64("prev-build-started"),
Finished: c.Int64("prev-build-finished"),
Status: c.String("prev-build-status"),
Event: c.String("prev-build-event"),
Link: c.String("prev-build-link"),
Commit: frontend.Commit{
Sha: c.String("prev-commit-sha"),
Ref: c.String("prev-commit-ref"),
Refspec: c.String("prev-commit-refspec"),
Branch: c.String("prev-commit-branch"),
Message: c.String("prev-commit-message"),
Author: frontend.Author{
Name: c.String("prev-commit-author-name"),
Email: c.String("prev-commit-author-email"),
Avatar: c.String("prev-commit-author-avatar"),
},
},
},
Job: frontend.Job{
Number: c.Int("job-number"),
},
Sys: frontend.System{
Name: c.String("system-name"),
Link: c.String("system-link"),
Arch: c.String("system-arch"),
},
}
return secrets
}
var defaultLogger = pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error {
part, err := rc.NextPart()
if err != nil {
return err
}
io.Copy(os.Stderr, part)
return nil
})

View file

@ -1,6 +1,6 @@
package main
import "github.com/codegangsta/cli"
import "github.com/urfave/cli"
var globalCmd = cli.Command{
Name: "global",

View file

@ -1,6 +1,6 @@
package main
import "github.com/codegangsta/cli"
import "github.com/urfave/cli"
var globalSecretCmd = cli.Command{
Name: "secret",

View file

@ -1,21 +1,13 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var globalSecretAddCmd = cli.Command{
Name: "add",
Usage: "adds a secret",
ArgsUsage: "[key] [value]",
Action: func(c *cli.Context) {
if err := globalSecretAdd(c); err != nil {
log.Fatalln(err)
}
},
Flags: secretAddFlags(),
Action: globalSecretAdd,
Flags: secretAddFlags(),
}
func globalSecretAdd(c *cli.Context) error {

View file

@ -1,20 +1,12 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var globalSecretListCmd = cli.Command{
Name: "ls",
Usage: "list all secrets",
Action: func(c *cli.Context) {
if err := globalSecretList(c); err != nil {
log.Fatalln(err)
}
},
Flags: secretListFlags(),
Name: "ls",
Usage: "list all secrets",
Action: globalSecretList,
Flags: secretListFlags(),
}
func globalSecretList(c *cli.Context) error {

View file

@ -1,19 +1,11 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var globalSecretRemoveCmd = cli.Command{
Name: "rm",
Usage: "remove a secret",
Action: func(c *cli.Context) {
if err := globalSecretRemove(c); err != nil {
log.Fatalln(err)
}
},
Name: "rm",
Usage: "remove a secret",
Action: globalSecretRemove,
}
func globalSecretRemove(c *cli.Context) error {

View file

@ -1,21 +1,16 @@
package main
import (
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var infoCmd = cli.Command{
Name: "info",
Usage: "show information about the current user",
Action: func(c *cli.Context) {
if err := info(c); err != nil {
log.Fatalln(err)
}
},
Name: "info",
Usage: "show information about the current user",
Action: info,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -1,14 +1,15 @@
package main
import (
"fmt"
"os"
"github.com/drone/drone/drone/agent"
"github.com/drone/drone/version"
"github.com/codegangsta/cli"
"github.com/ianschenck/envflag"
_ "github.com/joho/godotenv/autoload"
"github.com/urfave/cli"
)
func main() {
@ -32,7 +33,6 @@ func main() {
}
app.Commands = []cli.Command{
agent.AgentCmd,
agentsCmd,
buildCmd,
deployCmd,
execCmd,
@ -46,5 +46,8 @@ func main() {
globalCmd,
}
app.Run(os.Args)
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View file

@ -1,6 +1,6 @@
package main
import "github.com/codegangsta/cli"
import "github.com/urfave/cli"
var orgCmd = cli.Command{
Name: "org",

View file

@ -1,6 +1,6 @@
package main
import "github.com/codegangsta/cli"
import "github.com/urfave/cli"
var orgSecretCmd = cli.Command{
Name: "secret",

View file

@ -1,21 +1,13 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var orgSecretAddCmd = cli.Command{
Name: "add",
Usage: "adds a secret",
ArgsUsage: "[org] [key] [value]",
Action: func(c *cli.Context) {
if err := orgSecretAdd(c); err != nil {
log.Fatalln(err)
}
},
Flags: secretAddFlags(),
Action: orgSecretAdd,
Flags: secretAddFlags(),
}
func orgSecretAdd(c *cli.Context) error {

View file

@ -1,20 +1,12 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var orgSecretListCmd = cli.Command{
Name: "ls",
Usage: "list all secrets",
Action: func(c *cli.Context) {
if err := orgSecretList(c); err != nil {
log.Fatalln(err)
}
},
Flags: secretListFlags(),
Name: "ls",
Usage: "list all secrets",
Action: orgSecretList,
Flags: secretListFlags(),
}
func orgSecretList(c *cli.Context) error {

View file

@ -3,7 +3,7 @@ package main
import (
"log"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var orgSecretRemoveCmd = cli.Command{

View file

@ -1,6 +1,6 @@
package main
import "github.com/codegangsta/cli"
import "github.com/urfave/cli"
var repoCmd = cli.Command{
Name: "repo",

View file

@ -2,19 +2,14 @@ package main
import (
"fmt"
"log"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var repoAddCmd = cli.Command{
Name: "add",
Usage: "add a repository",
Action: func(c *cli.Context) {
if err := repoAdd(c); err != nil {
log.Fatalln(err)
}
},
Name: "add",
Usage: "add a repository",
Action: repoAdd,
}
func repoAdd(c *cli.Context) error {

View file

@ -2,19 +2,14 @@ package main
import (
"fmt"
"log"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var repoChownCmd = cli.Command{
Name: "chown",
Usage: "assume ownership of a repository",
Action: func(c *cli.Context) {
if err := repoChown(c); err != nil {
log.Fatalln(err)
}
},
Name: "chown",
Usage: "assume ownership of a repository",
Action: repoChown,
}
func repoChown(c *cli.Context) error {

View file

@ -1,21 +1,16 @@
package main
import (
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var repoInfoCmd = cli.Command{
Name: "info",
Usage: "show repository details",
Action: func(c *cli.Context) {
if err := repoInfo(c); err != nil {
log.Fatalln(err)
}
},
Name: "info",
Usage: "show repository details",
Action: repoInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -1,21 +1,16 @@
package main
import (
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var repoListCmd = cli.Command{
Name: "ls",
Usage: "list all repos",
Action: func(c *cli.Context) {
if err := repoList(c); err != nil {
log.Fatalln(err)
}
},
Name: "ls",
Usage: "list all repos",
Action: repoList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -2,19 +2,14 @@ package main
import (
"fmt"
"log"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var repoRemoveCmd = cli.Command{
Name: "rm",
Usage: "remove a repository",
Action: func(c *cli.Context) {
if err := repoRemove(c); err != nil {
log.Fatalln(err)
}
},
Name: "rm",
Usage: "remove a repository",
Action: repoRemove,
}
func repoRemove(c *cli.Context) error {

View file

@ -6,8 +6,8 @@ import (
"strings"
"text/template"
"github.com/codegangsta/cli"
"github.com/drone/drone/model"
"github.com/urfave/cli"
)
var secretCmd = cli.Command{

View file

@ -1,21 +1,13 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var secretAddCmd = cli.Command{
Name: "add",
Usage: "adds a secret",
ArgsUsage: "[repo] [key] [value]",
Action: func(c *cli.Context) {
if err := secretAdd(c); err != nil {
log.Fatalln(err)
}
},
Flags: secretAddFlags(),
Action: secretAdd,
Flags: secretAddFlags(),
}
func secretAdd(c *cli.Context) error {

View file

@ -1,20 +1,12 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var secretListCmd = cli.Command{
Name: "ls",
Usage: "list all secrets",
Action: func(c *cli.Context) {
if err := secretList(c); err != nil {
log.Fatalln(err)
}
},
Flags: secretListFlags(),
Name: "ls",
Usage: "list all secrets",
Action: secretList,
Flags: secretListFlags(),
}
func secretList(c *cli.Context) error {

View file

@ -1,19 +1,11 @@
package main
import (
"log"
"github.com/codegangsta/cli"
)
import "github.com/urfave/cli"
var secretRemoveCmd = cli.Command{
Name: "rm",
Usage: "remove a secret",
Action: func(c *cli.Context) {
if err := secretRemove(c); err != nil {
log.Fatalln(err)
}
},
Name: "rm",
Usage: "remove a secret",
Action: secretRemove,
}
func secretRemove(c *cli.Context) error {

View file

@ -8,18 +8,14 @@ import (
"github.com/drone/drone/router/middleware"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/gin-gonic/contrib/ginrus"
"github.com/urfave/cli"
)
var serverCmd = cli.Command{
Name: "server",
Usage: "starts the drone server daemon",
Action: func(c *cli.Context) {
if err := server(c); err != nil {
logrus.Fatal(err)
}
},
Name: "server",
Usage: "starts the drone server daemon",
Action: server,
Flags: []cli.Flag{
cli.BoolFlag{
EnvVar: "DRONE_DEBUG",
@ -301,8 +297,6 @@ func server(c *cli.Context) error {
middleware.Cache(c),
middleware.Store(c),
middleware.Remote(c),
middleware.Agents(c),
middleware.Broker(c),
)
// start the server with tls enabled

View file

@ -2,19 +2,14 @@ package main
import (
"io/ioutil"
"log"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var signCmd = cli.Command{
Name: "sign",
Usage: "creates a secure yaml file",
Action: func(c *cli.Context) {
if err := sign(c); err != nil {
log.Fatalln(err)
}
},
Name: "sign",
Usage: "creates a secure yaml file",
Action: sign,
Flags: []cli.Flag{
cli.StringFlag{
Name: "in",

View file

@ -1,6 +1,6 @@
package main
import "github.com/codegangsta/cli"
import "github.com/urfave/cli"
var userCmd = cli.Command{
Name: "user",

View file

@ -2,20 +2,15 @@ package main
import (
"fmt"
"log"
"github.com/codegangsta/cli"
"github.com/drone/drone/model"
"github.com/urfave/cli"
)
var userAddCmd = cli.Command{
Name: "add",
Usage: "adds a user",
Action: func(c *cli.Context) {
if err := userAdd(c); err != nil {
log.Fatalln(err)
}
},
Name: "add",
Usage: "adds a user",
Action: userAdd,
}
func userAdd(c *cli.Context) error {

View file

@ -2,21 +2,16 @@ package main
import (
"fmt"
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var userInfoCmd = cli.Command{
Name: "info",
Usage: "show user details",
Action: func(c *cli.Context) {
if err := userInfo(c); err != nil {
log.Fatalln(err)
}
},
Name: "info",
Usage: "show user details",
Action: userInfo,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -1,21 +1,16 @@
package main
import (
"log"
"os"
"text/template"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var userListCmd = cli.Command{
Name: "ls",
Usage: "list all users",
Action: func(c *cli.Context) {
if err := userList(c); err != nil {
log.Fatalln(err)
}
},
Name: "ls",
Usage: "list all users",
Action: userList,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format",

View file

@ -2,19 +2,14 @@ package main
import (
"fmt"
"log"
"github.com/codegangsta/cli"
"github.com/urfave/cli"
)
var userRemoveCmd = cli.Command{
Name: "rm",
Usage: "remove a user",
Action: func(c *cli.Context) {
if err := userRemove(c); err != nil {
log.Fatalln(err)
}
},
Name: "rm",
Usage: "remove a user",
Action: userRemove,
}
func userRemove(c *cli.Context) error {

View file

@ -9,8 +9,8 @@ import (
"github.com/drone/drone/client"
"github.com/codegangsta/cli"
"github.com/jackspirou/syscerts"
"github.com/urfave/cli"
)
func newClient(c *cli.Context) (client.Client, error) {

View file

@ -1,33 +0,0 @@
package middleware
import (
"github.com/codegangsta/cli"
"github.com/drone/drone/shared/token"
"github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
)
const agentKey = "agent"
// Agents is a middleware function that initializes the authorization middleware
// for agents to connect to the queue.
func Agents(cli *cli.Context) gin.HandlerFunc {
secret := cli.String("agent-secret")
if secret == "" {
logrus.Fatalf("failed to generate token from DRONE_AGENT_SECRET")
}
t := token.New(token.AgentToken, secret)
s, err := t.Sign(secret)
if err != nil {
logrus.Fatalf("failed to generate token from DRONE_AGENT_SECRET. %s", err)
}
logrus.Infof("using agent secret %s", secret)
logrus.Warnf("agents can connect with token %s", s)
return func(c *gin.Context) {
c.Set(agentKey, secret)
}
}

View file

@ -1,63 +0,0 @@
package middleware
import (
"os"
"sync"
handlers "github.com/drone/drone/server"
"github.com/codegangsta/cli"
"github.com/drone/mq/logger"
"github.com/drone/mq/server"
"github.com/drone/mq/stomp"
"github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
"github.com/tidwall/redlog"
)
const (
serverKey = "broker"
clientKey = "stomp.client" // mirrored from stomp/context
)
// Broker is a middleware function that initializes the broker
// and adds the broker client to the request context.
func Broker(cli *cli.Context) gin.HandlerFunc {
secret := cli.String("agent-secret")
if secret == "" {
logrus.Fatalf("fatal error. please provide the DRONE_SECRET")
}
// setup broker logging.
log := redlog.New(os.Stderr)
log.SetLevel(2)
logger.SetLogger(log)
if cli.Bool("broker-debug") {
log.SetLevel(1)
}
broker := server.NewServer(
server.WithCredentials("x-token", secret),
)
client := broker.Client()
var once sync.Once
return func(c *gin.Context) {
c.Set(serverKey, broker)
c.Set(clientKey, client)
once.Do(func() {
// this is some really hacky stuff
// turns out I need to do some refactoring
// don't judge!
// will fix in 0.6 release
ctx := c.Copy()
client.Connect(
stomp.WithCredentials("x-token", secret),
)
client.Subscribe("/queue/updates", stomp.HandlerFunc(func(m *stomp.Message) {
go handlers.HandleUpdate(ctx, m.Copy())
}))
})
}
}

View file

@ -3,8 +3,8 @@ package middleware
import (
"github.com/drone/drone/cache"
"github.com/codegangsta/cli"
"github.com/gin-gonic/gin"
"github.com/urfave/cli"
)
// Cache is a middleware function that initializes the Cache and attaches to

View file

@ -3,8 +3,8 @@ package middleware
import (
"github.com/drone/drone/model"
"github.com/codegangsta/cli"
"github.com/gin-gonic/gin"
"github.com/urfave/cli"
)
const configKey = "config"

View file

@ -4,7 +4,6 @@ import (
"fmt"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/drone/drone/remote"
"github.com/drone/drone/remote/bitbucket"
"github.com/drone/drone/remote/bitbucketserver"
@ -12,6 +11,7 @@ import (
"github.com/drone/drone/remote/gitlab"
"github.com/drone/drone/remote/gogs"
"github.com/gin-gonic/gin"
"github.com/urfave/cli"
)
// Remote is a middleware function that initializes the Remote and attaches to

View file

@ -1,9 +1,9 @@
package middleware
import (
"github.com/codegangsta/cli"
"github.com/drone/drone/store"
"github.com/drone/drone/store/datastore"
"github.com/urfave/cli"
"github.com/gin-gonic/gin"
)

View file

@ -2,7 +2,6 @@ package router
import (
"net/http"
"os"
"github.com/gin-gonic/gin"
@ -41,8 +40,6 @@ func Load(middleware ...gin.HandlerFunc) http.Handler {
e.GET("/logout", server.GetLogout)
e.NoRoute(server.ShowIndex)
// TODO above will Go away with React UI
user := e.Group("/api/user")
{
user.Use(session.MustUser())
@ -121,46 +118,28 @@ func Load(middleware ...gin.HandlerFunc) http.Handler {
badges.GET("/cc.xml", server.GetCC)
}
if os.Getenv("DRONE_CANARY") == "" {
e.POST("/hook", server.PostHook)
e.POST("/api/hook", server.PostHook)
} else {
e.POST("/hook", server.PostHook2)
e.POST("/api/hook", server.PostHook2)
e.POST("/hook", server.PostHook)
e.POST("/api/hook", server.PostHook)
ws := e.Group("/ws")
{
ws.GET("/broker", server.RPCHandler)
ws.GET("/rpc", server.RPCHandler)
ws.GET("/feed", server.EventStream)
ws.GET("/logs/:owner/:name/:build/:number",
session.SetRepo(),
session.SetPerm(),
session.MustPull,
server.LogStream,
)
}
if os.Getenv("DRONE_CANARY") == "" {
ws := e.Group("/ws")
{
ws.GET("/broker", server.Broker)
ws.GET("/feed", server.EventStream)
ws.GET("/logs/:owner/:name/:build/:number",
session.SetRepo(),
session.SetPerm(),
session.MustPull,
server.LogStream,
)
}
} else {
ws := e.Group("/ws")
{
ws.GET("/broker", server.RPCHandler)
ws.GET("/rpc", server.RPCHandler)
ws.GET("/feed", server.EventStream2)
ws.GET("/logs/:owner/:name/:build/:number",
session.SetRepo(),
session.SetPerm(),
session.MustPull,
server.LogStream2,
)
}
info := e.Group("/api/info")
{
info.GET("/queue",
session.MustAdmin(),
server.GetQueueInfo,
)
}
info := e.Group("/api/info")
{
info.GET("/queue",
session.MustAdmin(),
server.GetQueueInfo,
)
}
auth := e.Group("/authorize")
@ -191,12 +170,5 @@ func Load(middleware ...gin.HandlerFunc) http.Handler {
debugger.GET("/pprof/trace", debug.TraceHandler())
}
// bots := e.Group("/bots")
// {
// bots.Use(session.MustUser())
// bots.POST("/slack", Slack)
// bots.POST("/slack/:command", Slack)
// }
return e
}

View file

@ -7,7 +7,6 @@ import (
"fmt"
"io"
"net/http"
"os"
"strconv"
"time"
@ -18,13 +17,10 @@ import (
"github.com/drone/drone/remote"
"github.com/drone/drone/shared/httputil"
"github.com/drone/drone/store"
"github.com/drone/drone/yaml"
"github.com/gin-gonic/gin"
"github.com/square/go-jose"
"github.com/drone/drone/model"
"github.com/drone/drone/router/middleware/session"
"github.com/drone/mq/stomp"
)
func GetBuilds(c *gin.Context) {
@ -156,229 +152,10 @@ func DeleteBuild(c *gin.Context) {
job.ExitCode = 137
store.UpdateBuildJob(c, build, job)
if os.Getenv("DRONE_CANARY") == "" {
client := stomp.MustFromContext(c)
client.SendJSON("/topic/cancel", model.Event{
Type: model.Cancelled,
Repo: *repo,
Build: *build,
Job: *job,
}, stomp.WithHeader("job-id", strconv.FormatInt(job.ID, 10)))
} else {
config.queue.Error(context.Background(), fmt.Sprint(job.ID), queue.ErrCancel)
}
config.queue.Error(context.Background(), fmt.Sprint(job.ID), queue.ErrCancel)
c.String(204, "")
}
func PostBuild(c *gin.Context) {
if os.Getenv("DRONE_CANARY") == "true" {
PostBuild2(c)
return
}
remote_ := remote.FromContext(c)
repo := session.Repo(c)
fork := c.DefaultQuery("fork", "false")
num, err := strconv.Atoi(c.Param("number"))
if err != nil {
c.AbortWithError(http.StatusBadRequest, err)
return
}
user, err := store.GetUser(c, repo.UserID)
if err != nil {
log.Errorf("failure to find repo owner %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
build, err := store.GetBuildNumber(c, repo, num)
if err != nil {
log.Errorf("failure to get build %d. %s", num, err)
c.AbortWithError(404, err)
return
}
// if the remote has a refresh token, the current access token
// may be stale. Therefore, we should refresh prior to dispatching
// the job.
if refresher, ok := remote_.(remote.Refresher); ok {
ok, _ := refresher.Refresh(user)
if ok {
store.UpdateUser(c, user)
}
}
// fetch the .drone.yml file from the database
cfg := ToConfig(c)
raw, err := remote_.File(user, repo, build, cfg.Yaml)
if err != nil {
log.Errorf("failure to get build config for %s. %s", repo.FullName, err)
c.AbortWithError(404, err)
return
}
// Fetch secrets file but don't exit on error as it's optional
sec, err := remote_.File(user, repo, build, cfg.Shasum)
if err != nil {
log.Debugf("cannot find build secrets for %s. %s", repo.FullName, err)
}
netrc, err := remote_.Netrc(user, repo)
if err != nil {
log.Errorf("failure to generate netrc for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
jobs, err := store.GetJobList(c, build)
if err != nil {
log.Errorf("failure to get build %d jobs. %s", build.Number, err)
c.AbortWithError(404, err)
return
}
// must not restart a running build
if build.Status == model.StatusPending || build.Status == model.StatusRunning {
c.String(409, "Cannot re-start a started build")
return
}
// forking the build creates a duplicate of the build
// and then executes. This retains prior build history.
if forkit, _ := strconv.ParseBool(fork); forkit {
build.ID = 0
build.Number = 0
build.Parent = num
for _, job := range jobs {
job.ID = 0
job.NodeID = 0
}
err := store.CreateBuild(c, build, jobs...)
if err != nil {
c.String(500, err.Error())
return
}
event := c.DefaultQuery("event", build.Event)
if event == model.EventPush ||
event == model.EventPull ||
event == model.EventTag ||
event == model.EventDeploy {
build.Event = event
}
build.Deploy = c.DefaultQuery("deploy_to", build.Deploy)
}
// Read query string parameters into buildParams, exclude reserved params
var buildParams = map[string]string{}
for key, val := range c.Request.URL.Query() {
switch key {
case "fork", "event", "deploy_to":
default:
// We only accept string literals, because build parameters will be
// injected as environment variables
buildParams[key] = val[0]
}
}
// todo move this to database tier
// and wrap inside a transaction
build.Status = model.StatusPending
build.Started = 0
build.Finished = 0
build.Enqueued = time.Now().UTC().Unix()
build.Error = ""
for _, job := range jobs {
for k, v := range buildParams {
job.Environment[k] = v
}
job.Error = ""
job.Status = model.StatusPending
job.Started = 0
job.Finished = 0
job.ExitCode = 0
job.NodeID = 0
job.Enqueued = build.Enqueued
store.UpdateJob(c, job)
}
err = store.UpdateBuild(c, build)
if err != nil {
c.AbortWithStatus(500)
return
}
c.JSON(202, build)
// get the previous build so that we can send
// on status change notifications
last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
secs, err := store.GetMergedSecretList(c, repo)
if err != nil {
log.Debugf("Error getting secrets for %s#%d. %s", repo.FullName, build.Number, err)
}
var signed bool
var verified bool
signature, err := jose.ParseSigned(string(sec))
if err != nil {
log.Debugf("cannot parse .drone.yml.sig file. %s", err)
} else if len(sec) == 0 {
log.Debugf("cannot parse .drone.yml.sig file. empty file")
} else {
signed = true
output, err := signature.Verify([]byte(repo.Hash))
if err != nil {
log.Debugf("cannot verify .drone.yml.sig file. %s", err)
} else if string(output) != string(raw) {
log.Debugf("cannot verify .drone.yml.sig file. no match. %q <> %q", string(output), string(raw))
} else {
verified = true
}
}
log.Debugf(".drone.yml is signed=%v and verified=%v", signed, verified)
client := stomp.MustFromContext(c)
client.SendJSON("/topic/events", model.Event{
Type: model.Enqueued,
Repo: *repo,
Build: *build,
},
stomp.WithHeader("repo", repo.FullName),
stomp.WithHeader("private", strconv.FormatBool(repo.IsPrivate)),
)
for _, job := range jobs {
broker, _ := stomp.FromContext(c)
broker.SendJSON("/queue/pending", &model.Work{
Signed: signed,
Verified: verified,
User: user,
Repo: repo,
Build: build,
BuildLast: last,
Job: job,
Netrc: netrc,
Yaml: string(raw),
Secrets: secs,
System: &model.System{Link: httputil.GetURL(c.Request)},
},
stomp.WithHeader(
"platform",
yaml.ParsePlatformDefault(raw, "linux/amd64"),
),
stomp.WithHeaders(
yaml.ParseLabel(raw),
),
)
}
}
func GetBuildQueue(c *gin.Context) {
out, err := store.GetBuildQueue(c)
if err != nil {
@ -412,7 +189,7 @@ func copyLogs(dest io.Writer, src io.Reader) error {
//
//
func PostBuild2(c *gin.Context) {
func PostBuild(c *gin.Context) {
remote_ := remote.FromContext(c)
repo := session.Repo(c)

View file

@ -1,28 +1,268 @@
package server
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/square/go-jose"
log "github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus"
"github.com/drone/drone/model"
"github.com/drone/drone/remote"
"github.com/drone/drone/shared/httputil"
"github.com/drone/drone/shared/token"
"github.com/drone/drone/store"
"github.com/drone/drone/yaml"
"github.com/drone/mq/stomp"
"github.com/drone/envsubst"
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/frontend"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
"github.com/cncd/pipeline/pipeline/frontend/yaml/compiler"
"github.com/cncd/pipeline/pipeline/frontend/yaml/linter"
"github.com/cncd/pipeline/pipeline/frontend/yaml/matrix"
"github.com/cncd/pipeline/pipeline/rpc"
"github.com/cncd/pubsub"
"github.com/cncd/queue"
)
//
// CANARY IMPLEMENTATION
//
// This file is a complete disaster because I'm trying to wedge in some
// experimental code. Please pardon our appearance during renovations.
//
var skipRe = regexp.MustCompile(`\[(?i:ci *skip|skip *ci)\]`)
func GetQueueInfo(c *gin.Context) {
c.IndentedJSON(200,
config.queue.Info(c),
)
}
// return the metadata from the cli context.
func metadataFromStruct(repo *model.Repo, build, last *model.Build, job *model.Job, link string) frontend.Metadata {
return frontend.Metadata{
Repo: frontend.Repo{
Name: repo.Name,
Link: repo.Link,
Remote: repo.Clone,
Private: repo.IsPrivate,
},
Curr: frontend.Build{
Number: build.Number,
Created: build.Created,
Started: build.Started,
Finished: build.Finished,
Status: build.Status,
Event: build.Event,
Link: build.Link,
Target: build.Deploy,
Commit: frontend.Commit{
Sha: build.Commit,
Ref: build.Ref,
Refspec: build.Refspec,
Branch: build.Branch,
Message: build.Message,
Author: frontend.Author{
Name: build.Author,
Email: build.Email,
Avatar: build.Avatar,
},
},
},
Prev: frontend.Build{
Number: last.Number,
Created: last.Created,
Started: last.Started,
Finished: last.Finished,
Status: last.Status,
Event: last.Event,
Link: last.Link,
Target: last.Deploy,
Commit: frontend.Commit{
Sha: last.Commit,
Ref: last.Ref,
Refspec: last.Refspec,
Branch: last.Branch,
Message: last.Message,
Author: frontend.Author{
Name: last.Author,
Email: last.Email,
Avatar: last.Avatar,
},
},
},
Job: frontend.Job{
Number: job.Number,
Matrix: job.Environment,
},
Sys: frontend.System{
Name: "drone",
Link: link,
Arch: "linux/amd64",
},
}
}
type builder struct {
Repo *model.Repo
Curr *model.Build
Last *model.Build
Netrc *model.Netrc
Secs []*model.Secret
Link string
Yaml string
}
type buildItem struct {
Job *model.Job
Platform string
Labels map[string]string
Config *backend.Config
}
func (b *builder) Build() ([]*buildItem, error) {
axes, err := matrix.ParseString(b.Yaml)
if err != nil {
return nil, err
}
if len(axes) == 0 {
axes = append(axes, matrix.Axis{})
}
var items []*buildItem
for i, axis := range axes {
job := &model.Job{
BuildID: b.Curr.ID,
Number: i + 1,
Status: model.StatusPending,
Environment: axis,
Enqueued: b.Curr.Created,
}
metadata := metadataFromStruct(b.Repo, b.Curr, b.Last, job, b.Link)
environ := metadata.Environ()
for k, v := range metadata.EnvironDrone() {
environ[k] = v
}
secrets := map[string]string{}
for _, sec := range b.Secs {
if !sec.MatchEvent(b.Curr.Event) {
continue
}
if b.Curr.Verified || sec.SkipVerify {
secrets[sec.Name] = sec.Value
}
}
sub := func(name string) string {
if v, ok := environ[name]; ok {
return v
}
return secrets[name]
}
y := b.Yaml
if s, err := envsubst.Eval(y, sub); err != nil {
y = s
}
parsed, err := yaml.ParseString(y)
if err != nil {
return nil, err
}
metadata.Sys.Arch = parsed.Platform
if metadata.Sys.Arch == "" {
metadata.Sys.Arch = "linux/amd64"
}
lerr := linter.New(
linter.WithTrusted(b.Repo.IsTrusted),
).Lint(parsed)
if lerr != nil {
return nil, err
}
ir := compiler.New(
compiler.WithEnviron(environ),
// TODO ability to customize the escalated plugins
compiler.WithEscalated("plugins/docker", "plugins/gcr", "plugins/ecr"),
compiler.WithLocal(false),
compiler.WithNetrc(b.Netrc.Login, b.Netrc.Password, b.Netrc.Machine),
compiler.WithPrefix(
fmt.Sprintf(
"%d_%d",
job.ID,
time.Now().Unix(),
),
),
compiler.WithEnviron(job.Environment),
compiler.WithProxy(),
// TODO ability to set global volumes for things like certs
compiler.WithVolumes(),
compiler.WithWorkspaceFromURL("/drone", b.Curr.Link),
).Compile(parsed)
for _, sec := range b.Secs {
if !sec.MatchEvent(b.Curr.Event) {
continue
}
if b.Curr.Verified || sec.SkipVerify {
ir.Secrets = append(ir.Secrets, &backend.Secret{
Mask: sec.Conceal,
Name: sec.Name,
Value: sec.Value,
})
}
}
item := &buildItem{
Job: job,
Config: ir,
Labels: parsed.Labels,
Platform: metadata.Sys.Arch,
}
if item.Labels == nil {
item.Labels = map[string]string{}
}
items = append(items, item)
}
return items, nil
}
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
func PostHook(c *gin.Context) {
remote_ := remote.FromContext(c)
tmprepo, build, err := remote_.Hook(c.Request)
if err != nil {
log.Errorf("failure to parse hook. %s", err)
logrus.Errorf("failure to parse hook. %s", err)
c.AbortWithError(400, err)
return
}
@ -31,7 +271,7 @@ func PostHook(c *gin.Context) {
return
}
if tmprepo == nil {
log.Errorf("failure to ascertain repo from hook.")
logrus.Errorf("failure to ascertain repo from hook.")
c.Writer.WriteHeader(400)
return
}
@ -40,14 +280,14 @@ func PostHook(c *gin.Context) {
// wrapped in square brackets appear in the commit message
skipMatch := skipRe.FindString(build.Message)
if len(skipMatch) > 0 {
log.Infof("ignoring hook. %s found in %s", skipMatch, build.Commit)
logrus.Infof("ignoring hook. %s found in %s", skipMatch, build.Commit)
c.Writer.WriteHeader(204)
return
}
repo, err := store.GetRepoOwnerName(c, tmprepo.Owner, tmprepo.Name)
if err != nil {
log.Errorf("failure to find repo %s/%s from hook. %s", tmprepo.Owner, tmprepo.Name, err)
logrus.Errorf("failure to find repo %s/%s from hook. %s", tmprepo.Owner, tmprepo.Name, err)
c.AbortWithError(404, err)
return
}
@ -57,18 +297,18 @@ func PostHook(c *gin.Context) {
return repo.Hash, nil
})
if err != nil {
log.Errorf("failure to parse token from hook for %s. %s", repo.FullName, err)
logrus.Errorf("failure to parse token from hook for %s. %s", repo.FullName, err)
c.AbortWithError(400, err)
return
}
if parsed.Text != repo.FullName {
log.Errorf("failure to verify token from hook. Expected %s, got %s", repo.FullName, parsed.Text)
logrus.Errorf("failure to verify token from hook. Expected %s, got %s", repo.FullName, parsed.Text)
c.AbortWithStatus(403)
return
}
if repo.UserID == 0 {
log.Warnf("ignoring hook. repo %s has no owner.", repo.FullName)
logrus.Warnf("ignoring hook. repo %s has no owner.", repo.FullName)
c.Writer.WriteHeader(204)
return
}
@ -81,33 +321,18 @@ func PostHook(c *gin.Context) {
}
if skipped {
log.Infof("ignoring hook. repo %s is disabled for %s events.", repo.FullName, build.Event)
logrus.Infof("ignoring hook. repo %s is disabled for %s events.", repo.FullName, build.Event)
c.Writer.WriteHeader(204)
return
}
user, err := store.GetUser(c, repo.UserID)
if err != nil {
log.Errorf("failure to find repo owner %s. %s", repo.FullName, err)
logrus.Errorf("failure to find repo owner %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
// if there is no email address associated with the pull request,
// we lookup the email address based on the authors github login.
//
// my initial hesitation with this code is that it has the ability
// to expose your email address. At the same time, your email address
// is already exposed in the public .git log. So while some people will
// a small number of people will probably be upset by this, I'm not sure
// it is actually that big of a deal.
if len(build.Email) == 0 {
author, uerr := store.GetUserLogin(c, build.Author)
if uerr == nil {
build.Email = author.Email
}
}
// if the remote has a refresh token, the current access token
// may be stale. Therefore, we should refresh prior to dispatching
// the job.
@ -119,26 +344,16 @@ func PostHook(c *gin.Context) {
}
// fetch the build file from the database
config := ToConfig(c)
raw, err := remote_.File(user, repo, build, config.Yaml)
cfg := ToConfig(c)
raw, err := remote_.File(user, repo, build, cfg.Yaml)
if err != nil {
log.Errorf("failure to get build config for %s. %s", repo.FullName, err)
logrus.Errorf("failure to get build config for %s. %s", repo.FullName, err)
c.AbortWithError(404, err)
return
}
sec, err := remote_.File(user, repo, build, config.Shasum)
sec, err := remote_.File(user, repo, build, cfg.Shasum)
if err != nil {
log.Debugf("cannot find build secrets for %s. %s", repo.FullName, err)
// NOTE we don't exit on failure. The sec file is optional
}
axes, err := yaml.ParseMatrix(raw)
if err != nil {
c.String(500, "Failed to parse yaml file or calculate matrix. %s", err)
return
}
if len(axes) == 0 {
axes = append(axes, yaml.Axis{})
logrus.Debugf("cannot find yaml signature for %s. %s", repo.FullName, err)
}
netrc, err := remote_.Netrc(user, repo)
@ -148,24 +363,28 @@ func PostHook(c *gin.Context) {
}
// verify the branches can be built vs skipped
branches := yaml.ParseBranch(raw)
if !branches.Match(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
branches, err := yaml.ParseBytes(raw)
if err != nil {
c.String(500, "Failed to parse yaml file. %s", err)
return
}
if !branches.Branches.Match(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
c.String(200, "Branch does not match restrictions defined in yaml")
return
}
signature, err := jose.ParseSigned(string(sec))
if err != nil {
log.Debugf("cannot parse .drone.yml.sig file. %s", err)
logrus.Debugf("cannot parse .drone.yml.sig file. %s", err)
} else if len(sec) == 0 {
log.Debugf("cannot parse .drone.yml.sig file. empty file")
logrus.Debugf("cannot parse .drone.yml.sig file. empty file")
} else {
build.Signed = true
output, verr := signature.Verify([]byte(repo.Hash))
if verr != nil {
log.Debugf("cannot verify .drone.yml.sig file. %s", verr)
logrus.Debugf("cannot verify .drone.yml.sig file. %s", verr)
} else if string(output) != string(raw) {
log.Debugf("cannot verify .drone.yml.sig file. no match")
logrus.Debugf("cannot verify .drone.yml.sig file. no match")
} else {
build.Verified = true
}
@ -175,71 +394,94 @@ func PostHook(c *gin.Context) {
build.Status = model.StatusPending
build.RepoID = repo.ID
// and use a transaction
var jobs []*model.Job
for num, axis := range axes {
jobs = append(jobs, &model.Job{
BuildID: build.ID,
Number: num + 1,
Status: model.StatusPending,
Environment: axis,
})
}
err = store.CreateBuild(c, build, jobs...)
if err != nil {
log.Errorf("failure to save commit for %s. %s", repo.FullName, err)
if err := store.CreateBuild(c, build, build.Jobs...); err != nil {
logrus.Errorf("failure to save commit for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
c.JSON(200, build)
url := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
err = remote_.Status(user, repo, build, url)
if err != nil {
log.Errorf("error setting commit status for %s/%d", repo.FullName, build.Number)
}
// get the previous build so that we can send
// on status change notifications
last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
secs, err := store.GetMergedSecretList(c, repo)
if err != nil {
log.Debugf("Error getting secrets for %s#%d. %s", repo.FullName, build.Number, err)
logrus.Debugf("Error getting secrets for %s#%d. %s", repo.FullName, build.Number, err)
}
client := stomp.MustFromContext(c)
client.SendJSON("topic/events", model.Event{
//
// BELOW: NEW
//
defer func() {
uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
err = remote_.Status(user, repo, build, uri)
if err != nil {
logrus.Errorf("error setting commit status for %s/%d", repo.FullName, build.Number)
}
}()
b := builder{
Repo: repo,
Curr: build,
Last: last,
Netrc: netrc,
Secs: secs,
Link: httputil.GetURL(c.Request),
Yaml: string(raw),
}
items, err := b.Build()
if err != nil {
build.Status = model.StatusError
build.Started = time.Now().Unix()
build.Finished = build.Started
build.Error = err.Error()
return
}
for _, item := range items {
build.Jobs = append(build.Jobs, item.Job)
store.CreateJob(c, item.Job)
// TODO err
}
//
// publish topic
//
message := pubsub.Message{
Labels: map[string]string{
"repo": repo.FullName,
"private": strconv.FormatBool(repo.IsPrivate),
},
}
message.Data, _ = json.Marshal(model.Event{
Type: model.Enqueued,
Repo: *repo,
Build: *build,
},
stomp.WithHeader("repo", repo.FullName),
stomp.WithHeader("private", strconv.FormatBool(repo.IsPrivate)),
)
})
// TODO remove global reference
config.pubsub.Publish(c, "topic/events", message)
//
// end publish topic
//
for _, job := range jobs {
broker, _ := stomp.FromContext(c)
broker.SendJSON("/queue/pending", &model.Work{
Signed: build.Signed,
Verified: build.Verified,
User: user,
Repo: repo,
Build: build,
BuildLast: last,
Job: job,
Netrc: netrc,
Yaml: string(raw),
Secrets: secs,
System: &model.System{Link: httputil.GetURL(c.Request)},
},
stomp.WithHeader(
"platform",
yaml.ParsePlatformDefault(raw, "linux/amd64"),
),
stomp.WithHeaders(
yaml.ParseLabel(raw),
),
)
for _, item := range items {
task := new(queue.Task)
task.ID = fmt.Sprint(item.Job.ID)
task.Labels = map[string]string{}
task.Labels["platform"] = item.Platform
for k, v := range item.Labels {
task.Labels[k] = v
}
task.Data, _ = json.Marshal(rpc.Pipeline{
ID: fmt.Sprint(item.Job.ID),
Config: item.Config,
Timeout: b.Repo.Timeout,
})
config.logger.Open(context.Background(), task.ID)
config.queue.Push(context.Background(), task)
}
}

View file

@ -1,869 +0,0 @@
package server
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/square/go-jose"
"github.com/Sirupsen/logrus"
"github.com/drone/drone/model"
"github.com/drone/drone/remote"
"github.com/drone/drone/shared/httputil"
"github.com/drone/drone/shared/token"
"github.com/drone/drone/store"
"github.com/drone/envsubst"
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/frontend"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
"github.com/cncd/pipeline/pipeline/frontend/yaml/compiler"
"github.com/cncd/pipeline/pipeline/frontend/yaml/linter"
"github.com/cncd/pipeline/pipeline/frontend/yaml/matrix"
"github.com/cncd/pipeline/pipeline/rpc"
"github.com/cncd/pubsub"
"github.com/cncd/queue"
)
//
// CANARY IMPLEMENTATION
//
// This file is a complete disaster because I'm trying to wedge in some
// experimental code. Please pardon our appearance during renovations.
//
var skipRe = regexp.MustCompile(`\[(?i:ci *skip|skip *ci)\]`)
func GetQueueInfo(c *gin.Context) {
c.IndentedJSON(200,
config.queue.Info(c),
)
}
// func PostHookOld(c *gin.Context) {
// remote_ := remote.FromContext(c)
//
// tmprepo, build, err := remote_.Hook(c.Request)
// if err != nil {
// logrus.Errorf("failure to parse hook. %s", err)
// c.AbortWithError(400, err)
// return
// }
// if build == nil {
// c.Writer.WriteHeader(200)
// return
// }
// if tmprepo == nil {
// logrus.Errorf("failure to ascertain repo from hook.")
// c.Writer.WriteHeader(400)
// return
// }
//
// // skip the build if any case-insensitive combination of the words "skip" and "ci"
// // wrapped in square brackets appear in the commit message
// skipMatch := skipRe.FindString(build.Message)
// if len(skipMatch) > 0 {
// logrus.Infof("ignoring hook. %s found in %s", skipMatch, build.Commit)
// c.Writer.WriteHeader(204)
// return
// }
//
// repo, err := store.GetRepoOwnerName(c, tmprepo.Owner, tmprepo.Name)
// if err != nil {
// logrus.Errorf("failure to find repo %s/%s from hook. %s", tmprepo.Owner, tmprepo.Name, err)
// c.AbortWithError(404, err)
// return
// }
//
// // get the token and verify the hook is authorized
// parsed, err := token.ParseRequest(c.Request, func(t *token.Token) (string, error) {
// return repo.Hash, nil
// })
// if err != nil {
// logrus.Errorf("failure to parse token from hook for %s. %s", repo.FullName, err)
// c.AbortWithError(400, err)
// return
// }
// if parsed.Text != repo.FullName {
// logrus.Errorf("failure to verify token from hook. Expected %s, got %s", repo.FullName, parsed.Text)
// c.AbortWithStatus(403)
// return
// }
//
// if repo.UserID == 0 {
// logrus.Warnf("ignoring hook. repo %s has no owner.", repo.FullName)
// c.Writer.WriteHeader(204)
// return
// }
// var skipped = true
// if (build.Event == model.EventPush && repo.AllowPush) ||
// (build.Event == model.EventPull && repo.AllowPull) ||
// (build.Event == model.EventDeploy && repo.AllowDeploy) ||
// (build.Event == model.EventTag && repo.AllowTag) {
// skipped = false
// }
//
// if skipped {
// logrus.Infof("ignoring hook. repo %s is disabled for %s events.", repo.FullName, build.Event)
// c.Writer.WriteHeader(204)
// return
// }
//
// user, err := store.GetUser(c, repo.UserID)
// if err != nil {
// logrus.Errorf("failure to find repo owner %s. %s", repo.FullName, err)
// c.AbortWithError(500, err)
// return
// }
//
// // if the remote has a refresh token, the current access token
// // may be stale. Therefore, we should refresh prior to dispatching
// // the job.
// if refresher, ok := remote_.(remote.Refresher); ok {
// ok, _ := refresher.Refresh(user)
// if ok {
// store.UpdateUser(c, user)
// }
// }
//
// // fetch the build file from the database
// cfg := ToConfig(c)
// raw, err := remote_.File(user, repo, build, cfg.Yaml)
// if err != nil {
// logrus.Errorf("failure to get build config for %s. %s", repo.FullName, err)
// c.AbortWithError(404, err)
// return
// }
// sec, err := remote_.File(user, repo, build, cfg.Shasum)
// if err != nil {
// logrus.Debugf("cannot find yaml signature for %s. %s", repo.FullName, err)
// // NOTE we don't exit on failure. The sec file is optional
// }
//
// axes, err := matrix.Parse(raw)
// if err != nil {
// c.String(500, "Failed to parse yaml file or calculate matrix. %s", err)
// return
// }
// if len(axes) == 0 {
// axes = append(axes, matrix.Axis{})
// }
//
// netrc, err := remote_.Netrc(user, repo)
// if err != nil {
// c.String(500, "Failed to generate netrc file. %s", err)
// return
// }
//
// // verify the branches can be built vs skipped
// branches, err := yaml.ParseBytes(raw)
// if err != nil {
// c.String(500, "Failed to parse yaml file. %s", err)
// return
// }
// if !branches.Branches.Match(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
// c.String(200, "Branch does not match restrictions defined in yaml")
// return
// }
//
// signature, err := jose.ParseSigned(string(sec))
// if err != nil {
// logrus.Debugf("cannot parse .drone.yml.sig file. %s", err)
// } else if len(sec) == 0 {
// logrus.Debugf("cannot parse .drone.yml.sig file. empty file")
// } else {
// build.Signed = true
// output, verr := signature.Verify([]byte(repo.Hash))
// if verr != nil {
// logrus.Debugf("cannot verify .drone.yml.sig file. %s", verr)
// } else if string(output) != string(raw) {
// logrus.Debugf("cannot verify .drone.yml.sig file. no match")
// } else {
// build.Verified = true
// }
// }
//
// // update some build fields
// build.Status = model.StatusPending
// build.RepoID = repo.ID
//
// // and use a transaction
// var jobs []*model.Job
// for num, axis := range axes {
// jobs = append(jobs, &model.Job{
// BuildID: build.ID,
// Number: num + 1,
// Status: model.StatusPending,
// Environment: axis,
// })
// }
// err = store.CreateBuild(c, build, jobs...)
// if err != nil {
// logrus.Errorf("failure to save commit for %s. %s", repo.FullName, err)
// c.AbortWithError(500, err)
// return
// }
//
// c.JSON(200, build)
//
// uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
// err = remote_.Status(user, repo, build, uri)
// if err != nil {
// logrus.Errorf("error setting commit status for %s/%d", repo.FullName, build.Number)
// }
//
// // get the previous build so that we can send
// // on status change notifications
// last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
// secs, err := store.GetMergedSecretList(c, repo)
// if err != nil {
// logrus.Debugf("Error getting secrets for %s#%d. %s", repo.FullName, build.Number, err)
// }
//
// //
// // BELOW: NEW
// //
//
// b := builder{
// Repo: repo,
// Curr: build,
// Last: last,
// Netrc: netrc,
// Secs: secs,
// Link: httputil.GetURL(c.Request),
// Yaml: string(raw),
// }
// items, err := b.Build()
// if err != nil {
// build.Status = model.StatusError
// build.Started = time.Now().Unix()
// build.Finished = build.Started
// build.Error = err.Error()
// store.CreateBuild(c, build, build.Jobs...)
// return
// }
//
// for _, item := range items {
// build.Jobs = append(build.Jobs, item.Job)
// }
//
// if err := store.CreateBuild(c, build, build.Jobs...); err != nil {
// logrus.Errorf("failure to save commit for %s. %s", repo.FullName, err)
// c.AbortWithError(500, err)
// return
// }
//
// for _, item := range items {
//
// task := new(queue.Task)
// task.ID = fmt.Sprint(item.Job.ID)
// task.Labels = map[string]string{}
// task.Labels["platform"] = item.Platform
// for k, v := range item.Labels {
// task.Labels[k] = v
// }
//
// task.Data, _ = json.Marshal(rpc.Pipeline{
// ID: fmt.Sprint(item.Job.ID),
// Config: item.Config,
// Timeout: b.Repo.Timeout,
// })
//
// config.logger.Open(context.Background(), task.ID)
// config.queue.Push(context.Background(), task)
// }
//
// //
// // new code here
// //
//
// message := pubsub.Message{
// Labels: map[string]string{
// "repo": repo.FullName,
// "private": strconv.FormatBool(repo.IsPrivate),
// },
// }
// message.Data, _ = json.Marshal(model.Event{
// Type: model.Enqueued,
// Repo: *repo,
// Build: *build,
// })
// // TODO remove global reference
// config.pubsub.Publish(c, "topic/events", message)
//
// //
// // workspace
// //
//
// for _, job := range jobs {
//
// metadata := metadataFromStruct(repo, build, last, job, httputil.GetURL(c.Request))
// environ := metadata.Environ()
//
// secrets := map[string]string{}
// for _, sec := range secs {
// if !sec.MatchEvent(build.Event) {
// continue
// }
// if build.Verified || sec.SkipVerify {
// secrets[sec.Name] = sec.Value
// }
// }
// sub := func(name string) string {
// if v, ok := environ[name]; ok {
// return v
// }
// return secrets[name]
// }
// if s, err := envsubst.Eval(string(raw), sub); err != nil {
// raw = []byte(s)
// }
// parsed, err := yaml.ParseBytes(raw)
// if err != nil {
// job.ExitCode = 255
// job.Enqueued = time.Now().Unix()
// job.Started = time.Now().Unix()
// job.Finished = time.Now().Unix()
// job.Error = err.Error()
// store.UpdateBuildJob(c, build, job)
// continue
// }
//
// lerr := linter.New(
// linter.WithTrusted(repo.IsTrusted),
// ).Lint(parsed)
// if lerr != nil {
// job.ExitCode = 255
// job.Enqueued = time.Now().Unix()
// job.Started = time.Now().Unix()
// job.Finished = time.Now().Unix()
// job.Error = lerr.Error()
// store.UpdateBuildJob(c, build, job)
// continue
// }
//
// ir := compiler.New(
// compiler.WithEnviron(environ),
// // TODO ability to customize the escalated plugins
// compiler.WithEscalated("plugins/docker", "plugins/gcr", "plugins/ecr"),
// compiler.WithLocal(false),
// compiler.WithNetrc(netrc.Login, netrc.Password, netrc.Machine),
// compiler.WithPrefix(
// fmt.Sprintf(
// "%d_%d",
// job.ID,
// time.Now().Unix(),
// ),
// ),
// compiler.WithEnviron(job.Environment),
// compiler.WithProxy(),
// // TODO ability to set global volumes for things like certs
// compiler.WithVolumes(),
// compiler.WithWorkspaceFromURL("/drone", repo.Link),
// ).Compile(parsed)
//
// // TODO there is a chicken and egg problem here because
// // the compiled yaml has a platform environment variable
// // that is not correctly set, because we are just about
// // to set it ....
// // TODO maybe we remove platform from metadata and let
// // the compiler set the value from the yaml itself.
// if parsed.Platform == "" {
// parsed.Platform = "linux/amd64"
// }
//
// for _, sec := range secs {
// if !sec.MatchEvent(build.Event) {
// continue
// }
// if build.Verified || sec.SkipVerify {
// ir.Secrets = append(ir.Secrets, &backend.Secret{
// Mask: sec.Conceal,
// Name: sec.Name,
// Value: sec.Value,
// })
// }
// }
//
// task := new(queue.Task)
// task.ID = fmt.Sprint(job.ID)
// task.Labels = map[string]string{}
// task.Labels["platform"] = parsed.Platform
// if parsed.Labels != nil {
// for k, v := range parsed.Labels {
// task.Labels[k] = v
// }
// }
//
// task.Data, _ = json.Marshal(rpc.Pipeline{
// ID: fmt.Sprint(job.ID),
// Config: ir,
// Timeout: repo.Timeout,
// })
//
// config.logger.Open(context.Background(), task.ID)
// config.queue.Push(context.Background(), task)
// }
//
// }
// return the metadata from the cli context.
func metadataFromStruct(repo *model.Repo, build, last *model.Build, job *model.Job, link string) frontend.Metadata {
return frontend.Metadata{
Repo: frontend.Repo{
Name: repo.Name,
Link: repo.Link,
Remote: repo.Clone,
Private: repo.IsPrivate,
},
Curr: frontend.Build{
Number: build.Number,
Created: build.Created,
Started: build.Started,
Finished: build.Finished,
Status: build.Status,
Event: build.Event,
Link: build.Link,
Target: build.Deploy,
Commit: frontend.Commit{
Sha: build.Commit,
Ref: build.Ref,
Refspec: build.Refspec,
Branch: build.Branch,
Message: build.Message,
Author: frontend.Author{
Name: build.Author,
Email: build.Email,
Avatar: build.Avatar,
},
},
},
Prev: frontend.Build{
Number: last.Number,
Created: last.Created,
Started: last.Started,
Finished: last.Finished,
Status: last.Status,
Event: last.Event,
Link: last.Link,
Target: last.Deploy,
Commit: frontend.Commit{
Sha: last.Commit,
Ref: last.Ref,
Refspec: last.Refspec,
Branch: last.Branch,
Message: last.Message,
Author: frontend.Author{
Name: last.Author,
Email: last.Email,
Avatar: last.Avatar,
},
},
},
Job: frontend.Job{
Number: job.Number,
Matrix: job.Environment,
},
Sys: frontend.System{
Name: "drone",
Link: link,
Arch: "linux/amd64",
},
}
}
// use helper funciton to return ([]backend.Config, error)
// 1. fetch everything from github
// 2. create and persist the build object
//
// 3. generate the build jobs [Launcher?]
// a. parse yaml
// b. lint yaml
// c. compile yaml
//
// 4. persist the build jobs (... what if I already have jobs, via re-start)
// 5. update github status
// 6. send to queue
// 7. trigger pubsub
type builder struct {
Repo *model.Repo
Curr *model.Build
Last *model.Build
Netrc *model.Netrc
Secs []*model.Secret
Link string
Yaml string
}
type buildItem struct {
Job *model.Job
Platform string
Labels map[string]string
Config *backend.Config
}
func (b *builder) Build() ([]*buildItem, error) {
axes, err := matrix.ParseString(b.Yaml)
if err != nil {
return nil, err
}
if len(axes) == 0 {
axes = append(axes, matrix.Axis{})
}
var items []*buildItem
for i, axis := range axes {
job := &model.Job{
BuildID: b.Curr.ID,
Number: i + 1,
Status: model.StatusPending,
Environment: axis,
Enqueued: b.Curr.Created,
}
metadata := metadataFromStruct(b.Repo, b.Curr, b.Last, job, b.Link)
environ := metadata.Environ()
for k, v := range metadata.EnvironDrone() {
environ[k] = v
}
secrets := map[string]string{}
for _, sec := range b.Secs {
if !sec.MatchEvent(b.Curr.Event) {
continue
}
if b.Curr.Verified || sec.SkipVerify {
secrets[sec.Name] = sec.Value
}
}
sub := func(name string) string {
if v, ok := environ[name]; ok {
return v
}
return secrets[name]
}
y := b.Yaml
if s, err := envsubst.Eval(y, sub); err != nil {
y = s
}
parsed, err := yaml.ParseString(y)
if err != nil {
return nil, err
}
metadata.Sys.Arch = parsed.Platform
if metadata.Sys.Arch == "" {
metadata.Sys.Arch = "linux/amd64"
}
lerr := linter.New(
linter.WithTrusted(b.Repo.IsTrusted),
).Lint(parsed)
if lerr != nil {
return nil, err
}
ir := compiler.New(
compiler.WithEnviron(environ),
// TODO ability to customize the escalated plugins
compiler.WithEscalated("plugins/docker", "plugins/gcr", "plugins/ecr"),
compiler.WithLocal(false),
compiler.WithNetrc(b.Netrc.Login, b.Netrc.Password, b.Netrc.Machine),
compiler.WithPrefix(
fmt.Sprintf(
"%d_%d",
job.ID,
time.Now().Unix(),
),
),
compiler.WithEnviron(job.Environment),
compiler.WithProxy(),
// TODO ability to set global volumes for things like certs
compiler.WithVolumes(),
compiler.WithWorkspaceFromURL("/drone", b.Curr.Link),
).Compile(parsed)
for _, sec := range b.Secs {
if !sec.MatchEvent(b.Curr.Event) {
continue
}
if b.Curr.Verified || sec.SkipVerify {
ir.Secrets = append(ir.Secrets, &backend.Secret{
Mask: sec.Conceal,
Name: sec.Name,
Value: sec.Value,
})
}
}
item := &buildItem{
Job: job,
Config: ir,
Labels: parsed.Labels,
Platform: metadata.Sys.Arch,
}
if item.Labels == nil {
item.Labels = map[string]string{}
}
items = append(items, item)
}
return items, nil
}
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
func PostHook2(c *gin.Context) {
remote_ := remote.FromContext(c)
tmprepo, build, err := remote_.Hook(c.Request)
if err != nil {
logrus.Errorf("failure to parse hook. %s", err)
c.AbortWithError(400, err)
return
}
if build == nil {
c.Writer.WriteHeader(200)
return
}
if tmprepo == nil {
logrus.Errorf("failure to ascertain repo from hook.")
c.Writer.WriteHeader(400)
return
}
// skip the build if any case-insensitive combination of the words "skip" and "ci"
// wrapped in square brackets appear in the commit message
skipMatch := skipRe.FindString(build.Message)
if len(skipMatch) > 0 {
logrus.Infof("ignoring hook. %s found in %s", skipMatch, build.Commit)
c.Writer.WriteHeader(204)
return
}
repo, err := store.GetRepoOwnerName(c, tmprepo.Owner, tmprepo.Name)
if err != nil {
logrus.Errorf("failure to find repo %s/%s from hook. %s", tmprepo.Owner, tmprepo.Name, err)
c.AbortWithError(404, err)
return
}
// get the token and verify the hook is authorized
parsed, err := token.ParseRequest(c.Request, func(t *token.Token) (string, error) {
return repo.Hash, nil
})
if err != nil {
logrus.Errorf("failure to parse token from hook for %s. %s", repo.FullName, err)
c.AbortWithError(400, err)
return
}
if parsed.Text != repo.FullName {
logrus.Errorf("failure to verify token from hook. Expected %s, got %s", repo.FullName, parsed.Text)
c.AbortWithStatus(403)
return
}
if repo.UserID == 0 {
logrus.Warnf("ignoring hook. repo %s has no owner.", repo.FullName)
c.Writer.WriteHeader(204)
return
}
var skipped = true
if (build.Event == model.EventPush && repo.AllowPush) ||
(build.Event == model.EventPull && repo.AllowPull) ||
(build.Event == model.EventDeploy && repo.AllowDeploy) ||
(build.Event == model.EventTag && repo.AllowTag) {
skipped = false
}
if skipped {
logrus.Infof("ignoring hook. repo %s is disabled for %s events.", repo.FullName, build.Event)
c.Writer.WriteHeader(204)
return
}
user, err := store.GetUser(c, repo.UserID)
if err != nil {
logrus.Errorf("failure to find repo owner %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
// if the remote has a refresh token, the current access token
// may be stale. Therefore, we should refresh prior to dispatching
// the job.
if refresher, ok := remote_.(remote.Refresher); ok {
ok, _ := refresher.Refresh(user)
if ok {
store.UpdateUser(c, user)
}
}
// fetch the build file from the database
cfg := ToConfig(c)
raw, err := remote_.File(user, repo, build, cfg.Yaml)
if err != nil {
logrus.Errorf("failure to get build config for %s. %s", repo.FullName, err)
c.AbortWithError(404, err)
return
}
sec, err := remote_.File(user, repo, build, cfg.Shasum)
if err != nil {
logrus.Debugf("cannot find yaml signature for %s. %s", repo.FullName, err)
}
netrc, err := remote_.Netrc(user, repo)
if err != nil {
c.String(500, "Failed to generate netrc file. %s", err)
return
}
// verify the branches can be built vs skipped
branches, err := yaml.ParseBytes(raw)
if err != nil {
c.String(500, "Failed to parse yaml file. %s", err)
return
}
if !branches.Branches.Match(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
c.String(200, "Branch does not match restrictions defined in yaml")
return
}
signature, err := jose.ParseSigned(string(sec))
if err != nil {
logrus.Debugf("cannot parse .drone.yml.sig file. %s", err)
} else if len(sec) == 0 {
logrus.Debugf("cannot parse .drone.yml.sig file. empty file")
} else {
build.Signed = true
output, verr := signature.Verify([]byte(repo.Hash))
if verr != nil {
logrus.Debugf("cannot verify .drone.yml.sig file. %s", verr)
} else if string(output) != string(raw) {
logrus.Debugf("cannot verify .drone.yml.sig file. no match")
} else {
build.Verified = true
}
}
// update some build fields
build.Status = model.StatusPending
build.RepoID = repo.ID
if err := store.CreateBuild(c, build, build.Jobs...); err != nil {
logrus.Errorf("failure to save commit for %s. %s", repo.FullName, err)
c.AbortWithError(500, err)
return
}
c.JSON(200, build)
// get the previous build so that we can send
// on status change notifications
last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
secs, err := store.GetMergedSecretList(c, repo)
if err != nil {
logrus.Debugf("Error getting secrets for %s#%d. %s", repo.FullName, build.Number, err)
}
//
// BELOW: NEW
//
defer func() {
uri := fmt.Sprintf("%s/%s/%d", httputil.GetURL(c.Request), repo.FullName, build.Number)
err = remote_.Status(user, repo, build, uri)
if err != nil {
logrus.Errorf("error setting commit status for %s/%d", repo.FullName, build.Number)
}
}()
b := builder{
Repo: repo,
Curr: build,
Last: last,
Netrc: netrc,
Secs: secs,
Link: httputil.GetURL(c.Request),
Yaml: string(raw),
}
items, err := b.Build()
if err != nil {
build.Status = model.StatusError
build.Started = time.Now().Unix()
build.Finished = build.Started
build.Error = err.Error()
return
}
for _, item := range items {
build.Jobs = append(build.Jobs, item.Job)
store.CreateJob(c, item.Job)
// TODO err
}
//
// publish topic
//
message := pubsub.Message{
Labels: map[string]string{
"repo": repo.FullName,
"private": strconv.FormatBool(repo.IsPrivate),
},
}
message.Data, _ = json.Marshal(model.Event{
Type: model.Enqueued,
Repo: *repo,
Build: *build,
})
// TODO remove global reference
config.pubsub.Publish(c, "topic/events", message)
//
// end publish topic
//
for _, item := range items {
task := new(queue.Task)
task.ID = fmt.Sprint(item.Job.ID)
task.Labels = map[string]string{}
task.Labels["platform"] = item.Platform
for k, v := range item.Labels {
task.Labels[k] = v
}
task.Data, _ = json.Marshal(rpc.Pipeline{
ID: fmt.Sprint(item.Job.ID),
Config: item.Config,
Timeout: b.Repo.Timeout,
})
config.logger.Open(context.Background(), task.ID)
config.queue.Push(context.Background(), task)
}
}

View file

@ -1,163 +0,0 @@
package server
import (
"bytes"
"fmt"
"net/http"
"strconv"
"time"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/drone/drone/model"
"github.com/drone/drone/remote"
"github.com/drone/drone/store"
"github.com/drone/mq/stomp"
"github.com/gorilla/websocket"
)
// newline defines a newline constant to separate lines in the build output
var newline = []byte{'\n'}
// upgrader defines the default behavior for upgrading the websocket.
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
return true
},
}
// HandleUpdate handles build updates from the agent and persists to the database.
func HandleUpdate(c context.Context, message *stomp.Message) {
defer func() {
message.Release()
if r := recover(); r != nil {
err := r.(error)
logrus.Errorf("Panic recover: broker update handler: %s", err)
}
}()
work := new(model.Work)
if err := message.Unmarshal(work); err != nil {
logrus.Errorf("Invalid input. %s", err)
return
}
// TODO(bradrydzewski) it is really annoying that we have to do this lookup
// and I'd prefer not to. The reason we do this is because the Build and Job
// have fields that aren't serialized to json and would be reset to their
// empty values if we just saved what was coming in the http.Request body.
build, err := store.GetBuild(c, work.Build.ID)
if err != nil {
logrus.Errorf("Unable to find build. %s", err)
return
}
job, err := store.GetJob(c, work.Job.ID)
if err != nil {
logrus.Errorf("Unable to find job. %s", err)
return
}
build.Started = work.Build.Started
build.Finished = work.Build.Finished
build.Status = work.Build.Status
job.Started = work.Job.Started
job.Finished = work.Job.Finished
job.Status = work.Job.Status
job.ExitCode = work.Job.ExitCode
job.Error = work.Job.Error
if build.Status == model.StatusPending {
build.Started = work.Job.Started
build.Status = model.StatusRunning
store.UpdateBuild(c, build)
}
// if job.Status == model.StatusRunning {
// err := stream.Create(c, stream.ToKey(job.ID))
// if err != nil {
// logrus.Errorf("Unable to create stream. %s", err)
// }
// }
ok, err := store.UpdateBuildJob(c, build, job)
if err != nil {
logrus.Errorf("Unable to update job. %s", err)
return
}
if ok {
// get the user because we transfer the user form the server to agent
// and back we lose the token which does not get serialized to json.
user, uerr := store.GetUser(c, work.User.ID)
if uerr != nil {
logrus.Errorf("Unable to find user. %s", err)
return
}
remote.Status(c, user, work.Repo, build,
fmt.Sprintf("%s/%s/%d", work.System.Link, work.Repo.FullName, work.Build.Number))
}
client := stomp.MustFromContext(c)
err = client.SendJSON("/topic/events", model.Event{
Type: func() model.EventType {
// HACK we don't even really care about the event type.
// so we should just simplify how events are triggered.
if job.Status == model.StatusRunning {
return model.Started
}
return model.Finished
}(),
Repo: *work.Repo,
Build: *build,
Job: *job,
},
stomp.WithHeader("repo", work.Repo.FullName),
stomp.WithHeader("private", strconv.FormatBool(work.Repo.IsPrivate)),
)
if err != nil {
logrus.Errorf("Unable to publish to /topic/events. %s", err)
}
if job.Status == model.StatusRunning {
return
}
var buf bytes.Buffer
var sub []byte
done := make(chan bool)
dest := fmt.Sprintf("/topic/logs.%d", job.ID)
sub, err = client.Subscribe(dest, stomp.HandlerFunc(func(m *stomp.Message) {
defer m.Release()
if m.Header.GetBool("eof") {
done <- true
return
}
buf.Write(m.Body)
buf.WriteByte('\n')
}))
if err != nil {
logrus.Errorf("Unable to read logs from broker. %s", err)
return
}
defer func() {
client.Send(dest, []byte{}, stomp.WithRetain("remove"))
client.Unsubscribe(sub)
}()
select {
case <-done:
case <-time.After(30 * time.Second):
logrus.Errorf("Unable to read logs from broker. Timeout. %s", err)
return
}
if err := store.WriteLog(c, job, &buf); err != nil {
logrus.Errorf("Unable to write logs to store. %s", err)
return
}
}

View file

@ -3,6 +3,7 @@ package server
import (
"context"
"fmt"
"net/http"
"strconv"
"time"
@ -12,7 +13,6 @@ import (
"github.com/drone/drone/model"
"github.com/drone/drone/router/middleware/session"
"github.com/drone/drone/store"
"github.com/drone/mq/stomp"
"github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
@ -28,160 +28,17 @@ var (
// Send pings to client with this period. Must be less than pongWait.
pingPeriod = 30 * time.Second
// upgrader defines the default behavior for upgrading the websocket.
upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
return true
},
}
)
// LogStream streams the build log output to the client.
func LogStream(c *gin.Context) {
repo := session.Repo(c)
buildn, _ := strconv.Atoi(c.Param("build"))
jobn, _ := strconv.Atoi(c.Param("number"))
c.Writer.Header().Set("Content-Type", "text/event-stream")
build, err := store.GetBuildNumber(c, repo, buildn)
if err != nil {
logrus.Debugln("stream cannot get build number.", err)
c.AbortWithError(404, err)
return
}
job, err := store.GetJobNumber(c, build, jobn)
if err != nil {
logrus.Debugln("stream cannot get job number.", err)
c.AbortWithError(404, err)
return
}
if job.Status != model.StatusRunning {
logrus.Debugln("stream not found.")
c.AbortWithStatus(404)
return
}
ws, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
if _, ok := err.(websocket.HandshakeError); !ok {
logrus.Errorf("Cannot upgrade websocket. %s", err)
}
return
}
logrus.Debugf("Successfull upgraded websocket")
ticker := time.NewTicker(pingPeriod)
defer ticker.Stop()
logs := make(chan []byte)
done := make(chan bool)
var eof bool
dest := fmt.Sprintf("/topic/logs.%d", job.ID)
client, _ := stomp.FromContext(c)
sub, err := client.Subscribe(dest, stomp.HandlerFunc(func(m *stomp.Message) {
if m.Header.GetBool("eof") {
eof = true
done <- true
} else if eof {
return
} else {
logs <- m.Body
}
m.Release()
}))
if err != nil {
logrus.Errorf("Unable to read logs from broker. %s", err)
return
}
defer func() {
client.Unsubscribe(sub)
close(done)
close(logs)
}()
for {
select {
case buf := <-logs:
ws.SetWriteDeadline(time.Now().Add(writeWait))
ws.WriteMessage(websocket.TextMessage, buf)
case <-done:
return
case <-ticker.C:
err := ws.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait))
if err != nil {
return
}
}
}
}
// EventStream produces the User event stream, sending all repository, build
// and agent events to the client.
func EventStream(c *gin.Context) {
ws, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
if _, ok := err.(websocket.HandshakeError); !ok {
logrus.Errorf("Cannot upgrade websocket. %s", err)
}
return
}
logrus.Debugf("Successfull upgraded websocket")
user := session.User(c)
repo := map[string]bool{}
if user != nil {
repo, _ = cache.GetRepoMap(c, user)
}
eventc := make(chan []byte, 10)
quitc := make(chan bool)
tick := time.NewTicker(pingPeriod)
defer func() {
tick.Stop()
ws.Close()
logrus.Debug("Successfully closed websocket")
}()
client := stomp.MustFromContext(c)
sub, err := client.Subscribe("/topic/events", stomp.HandlerFunc(func(m *stomp.Message) {
name := m.Header.GetString("repo")
priv := m.Header.GetBool("private")
if repo[name] || !priv {
eventc <- m.Body
}
m.Release()
}))
if err != nil {
logrus.Errorf("Unable to read logs from broker. %s", err)
return
}
defer func() {
client.Unsubscribe(sub)
close(quitc)
close(eventc)
}()
go func() {
defer func() {
recover()
}()
for {
select {
case <-quitc:
return
case event, ok := <-eventc:
if !ok {
return
}
ws.SetWriteDeadline(time.Now().Add(writeWait))
ws.WriteMessage(websocket.TextMessage, event)
case <-tick.C:
err := ws.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait))
if err != nil {
return
}
}
}
}()
reader(ws)
}
func reader(ws *websocket.Conn) {
defer ws.Close()
ws.SetReadLimit(512)
@ -198,14 +55,7 @@ func reader(ws *websocket.Conn) {
}
}
//
// CANARY IMPLEMENTATION
//
// This file is a complete disaster because I'm trying to wedge in some
// experimental code. Please pardon our appearance during renovations.
//
func LogStream2(c *gin.Context) {
func LogStream(c *gin.Context) {
repo := session.Repo(c)
buildn, _ := strconv.Atoi(c.Param("build"))
jobn, _ := strconv.Atoi(c.Param("number"))
@ -286,7 +136,7 @@ func LogStream2(c *gin.Context) {
reader(ws)
}
func EventStream2(c *gin.Context) {
func EventStream(c *gin.Context) {
ws, err := upgrader.Upgrade(c.Writer, c.Request, nil)
if err != nil {
if _, ok := err.(websocket.HandshakeError); !ok {

View file

@ -6,7 +6,7 @@ var (
// VersionMajor is for an API incompatible changes
VersionMajor = 0
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 5
VersionMinor = 6
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0

View file

@ -1,18 +0,0 @@
package yaml
import "gopkg.in/yaml.v2"
// ParseBranch parses the branch section of the Yaml document.
func ParseBranch(in []byte) Constraint {
out := struct {
Constraint Constraint `yaml:"branches"`
}{}
yaml.Unmarshal(in, &out)
return out.Constraint
}
// ParseBranchString parses the branch section of the Yaml document.
func ParseBranchString(in string) Constraint {
return ParseBranch([]byte(in))
}

View file

@ -1,44 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
)
func TestBranch(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Branch filter", func() {
g.It("Should parse and match emtpy", func() {
branch := ParseBranchString("")
g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match", func() {
branch := ParseBranchString("branches: { include: [ master, develop ] }")
g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match shortand", func() {
branch := ParseBranchString("branches: [ master, develop ]")
g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match shortand string", func() {
branch := ParseBranchString("branches: master")
g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match exclude", func() {
branch := ParseBranchString("branches: { exclude: [ master, develop ] }")
g.Assert(branch.Match("master")).IsFalse()
})
g.It("Should parse and match exclude shorthand", func() {
branch := ParseBranchString("branches: { exclude: master }")
g.Assert(branch.Match("master")).IsFalse()
})
})
}

View file

@ -1,26 +0,0 @@
package yaml
// Build represents Docker image build instructions.
type Build struct {
Context string
Dockerfile string
Args map[string]string
}
// UnmarshalYAML implements custom Yaml unmarshaling.
func (b *Build) UnmarshalYAML(unmarshal func(interface{}) error) error {
err := unmarshal(&b.Context)
if err == nil {
return nil
}
out := struct {
Context string
Dockerfile string
Args map[string]string
}{}
err = unmarshal(&out)
b.Context = out.Context
b.Args = out.Args
b.Dockerfile = out.Dockerfile
return err
}

View file

@ -1,38 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
"gopkg.in/yaml.v2"
)
func TestBuild(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Build", func() {
g.Describe("given a yaml file", func() {
g.It("should unmarshal", func() {
in := []byte(".")
out := Build{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
g.Assert(out.Context).Equal(".")
})
g.It("should unmarshal shorthand", func() {
in := []byte("{ context: ., dockerfile: Dockerfile }")
out := Build{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
g.Assert(out.Context).Equal(".")
g.Assert(out.Dockerfile).Equal("Dockerfile")
})
})
})
}

View file

@ -1,67 +0,0 @@
package yaml
import "gopkg.in/yaml.v2"
// Workspace represents the build workspace.
type Workspace struct {
Base string
Path string
}
// Config represents the build configuration Yaml document.
type Config struct {
Image string
Build *Build
Workspace *Workspace
Pipeline []*Container
Services []*Container
Volumes []*Volume
Networks []*Network
}
// ParseString parses the Yaml configuration document.
func ParseString(data string) (*Config, error) {
return Parse([]byte(data))
}
// Parse parses Yaml configuration document.
func Parse(data []byte) (*Config, error) {
v := struct {
Image string
Build *Build
Workspace *Workspace
Services containerList
Pipeline containerList
Networks networkList
Volumes volumeList
}{}
err := yaml.Unmarshal(data, &v)
if err != nil {
return nil, err
}
for _, c := range v.Services.containers {
c.Detached = true
}
return &Config{
Image: v.Image,
Build: v.Build,
Workspace: v.Workspace,
Services: v.Services.containers,
Pipeline: v.Pipeline.containers,
Networks: v.Networks.networks,
Volumes: v.Volumes.volumes,
}, nil
}
type config struct {
Image string
Build *Build
Workspace *Workspace
Services containerList
Pipeline containerList
Networks networkList
Volumes volumeList
}

View file

@ -1,108 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
)
func TestParse(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Parser", func() {
g.Describe("Given a yaml file", func() {
g.It("Should unmarshal a string", func() {
out, err := ParseString(sampleYaml)
if err != nil {
g.Fail(err)
}
g.Assert(out.Image).Equal("hello-world")
g.Assert(out.Workspace.Base).Equal("/go")
g.Assert(out.Workspace.Path).Equal("src/github.com/octocat/hello-world")
g.Assert(out.Build.Context).Equal(".")
g.Assert(out.Build.Dockerfile).Equal("Dockerfile")
g.Assert(out.Volumes[0].Name).Equal("custom")
g.Assert(out.Volumes[0].Driver).Equal("blockbridge")
g.Assert(out.Networks[0].Name).Equal("custom")
g.Assert(out.Networks[0].Driver).Equal("overlay")
g.Assert(out.Services[0].Name).Equal("database")
g.Assert(out.Services[0].Image).Equal("mysql")
g.Assert(out.Pipeline[0].Name).Equal("test")
g.Assert(out.Pipeline[0].Image).Equal("golang")
g.Assert(out.Pipeline[0].Commands).Equal([]string{"go install", "go test"})
g.Assert(out.Pipeline[1].Name).Equal("build")
g.Assert(out.Pipeline[1].Image).Equal("golang")
g.Assert(out.Pipeline[1].Commands).Equal([]string{"go build"})
g.Assert(out.Pipeline[2].Name).Equal("notify")
g.Assert(out.Pipeline[2].Image).Equal("slack")
})
// Check to make sure variable expansion works in yaml.MapSlice
g.It("Should unmarshal variables", func() {
out, err := ParseString(sampleVarYaml)
if err != nil {
g.Fail(err)
}
g.Assert(out.Pipeline[0].Name).Equal("notify_fail")
g.Assert(out.Pipeline[0].Image).Equal("plugins/slack")
g.Assert(len(out.Pipeline[0].Constraints.Event.Include)).Equal(0)
g.Assert(out.Pipeline[1].Name).Equal("notify_success")
g.Assert(out.Pipeline[1].Image).Equal("plugins/slack")
g.Assert(out.Pipeline[1].Constraints.Event.Include).Equal([]string{"success"})
})
})
})
}
var sampleYaml = `
image: hello-world
build:
context: .
dockerfile: Dockerfile
workspace:
path: src/github.com/octocat/hello-world
base: /go
pipeline:
test:
image: golang
commands:
- go install
- go test
build:
image: golang
commands:
- go build
when:
event: push
notify:
image: slack
channel: dev
when:
event: failure
services:
database:
image: mysql
networks:
custom:
driver: overlay
volumes:
custom:
driver: blockbridge
`
var sampleVarYaml = `
_slack: &SLACK
image: plugins/slack
pipeline:
notify_fail: *SLACK
notify_success:
<< : *SLACK
when:
event: success
`

View file

@ -1,155 +0,0 @@
package yaml
import (
"path/filepath"
"github.com/drone/drone/yaml/types"
)
// Constraints define constraints for container execution.
type Constraints struct {
Repo Constraint
Ref Constraint
Platform Constraint
Environment Constraint
Event Constraint
Branch Constraint
Status Constraint
Matrix ConstraintMap
Local types.BoolTrue
}
// Match returns true if all constraints match the given input. If a single constraint
// fails a false value is returned.
func (c *Constraints) Match(arch, target, event, branch, status string, matrix map[string]string) bool {
return c.Platform.Match(arch) &&
c.Environment.Match(target) &&
c.Event.Match(event) &&
c.Branch.Match(branch) &&
c.Status.Match(status) &&
c.Matrix.Match(matrix)
}
// Constraint defines an individual constraint.
type Constraint struct {
Include []string
Exclude []string
}
// Match returns true if the string matches the include patterns and does not
// match any of the exclude patterns.
func (c *Constraint) Match(v string) bool {
if c.Excludes(v) {
return false
}
if c.Includes(v) {
return true
}
if len(c.Include) == 0 {
return true
}
return false
}
// Includes returns true if the string matches matches the include patterns.
func (c *Constraint) Includes(v string) bool {
for _, pattern := range c.Include {
if ok, _ := filepath.Match(pattern, v); ok {
return true
}
}
return false
}
// Excludes returns true if the string matches matches the exclude patterns.
func (c *Constraint) Excludes(v string) bool {
for _, pattern := range c.Exclude {
if ok, _ := filepath.Match(pattern, v); ok {
return true
}
}
return false
}
// UnmarshalYAML implements custom Yaml unmarshaling.
func (c *Constraint) UnmarshalYAML(unmarshal func(interface{}) error) error {
var out1 = struct {
Include types.StringOrSlice
Exclude types.StringOrSlice
}{}
var out2 types.StringOrSlice
unmarshal(&out1)
unmarshal(&out2)
c.Exclude = out1.Exclude.Slice()
c.Include = append(
out1.Include.Slice(),
out2.Slice()...,
)
return nil
}
// ConstraintMap defines an individual constraint for key value structures.
type ConstraintMap struct {
Include map[string]string
Exclude map[string]string
}
// Match returns true if the params matches the include key values and does not
// match any of the exclude key values.
func (c *ConstraintMap) Match(params map[string]string) bool {
// when no includes or excludes automatically match
if len(c.Include) == 0 && len(c.Exclude) == 0 {
return true
}
// exclusions are processed first. So we can include everything and then
// selectively include others.
if len(c.Exclude) != 0 {
var matches int
for key, val := range c.Exclude {
if params[key] == val {
matches++
}
}
if matches == len(c.Exclude) {
return false
}
}
for key, val := range c.Include {
if params[key] != val {
return false
}
}
return true
}
// UnmarshalYAML implements custom Yaml unmarshaling.
func (c *ConstraintMap) UnmarshalYAML(unmarshal func(interface{}) error) error {
out1 := struct {
Include map[string]string
Exclude map[string]string
}{
Include: map[string]string{},
Exclude: map[string]string{},
}
out2 := map[string]string{}
unmarshal(&out1)
unmarshal(&out2)
c.Include = out1.Include
c.Exclude = out1.Exclude
for k, v := range out2 {
c.Include[k] = v
}
return nil
}

View file

@ -1,142 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
"gopkg.in/yaml.v2"
)
func TestConstraint(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Constraint", func() {
g.It("Should parse and match emtpy", func() {
c := parseConstraint("")
g.Assert(c.Match("master")).IsTrue()
})
g.It("Should parse and match", func() {
c := parseConstraint("{ include: [ master, develop ] }")
g.Assert(c.Include[0]).Equal("master")
g.Assert(c.Include[1]).Equal("develop")
g.Assert(c.Match("master")).IsTrue()
})
g.It("Should parse and match shortand", func() {
c := parseConstraint("[ master, develop ]")
g.Assert(c.Include[0]).Equal("master")
g.Assert(c.Include[1]).Equal("develop")
g.Assert(c.Match("master")).IsTrue()
})
g.It("Should parse and match shortand string", func() {
c := parseConstraint("master")
g.Assert(c.Include[0]).Equal("master")
g.Assert(c.Match("master")).IsTrue()
})
g.It("Should parse and match exclude", func() {
c := parseConstraint("{ exclude: [ master, develop ] }")
g.Assert(c.Exclude[0]).Equal("master")
g.Assert(c.Exclude[1]).Equal("develop")
g.Assert(c.Match("master")).IsFalse()
})
g.It("Should parse and match exclude shorthand", func() {
c := parseConstraint("{ exclude: master }")
g.Assert(c.Exclude[0]).Equal("master")
g.Assert(c.Match("master")).IsFalse()
})
g.It("Should match include", func() {
b := Constraint{}
b.Include = []string{"master"}
g.Assert(b.Match("master")).IsTrue()
})
g.It("Should match include pattern", func() {
b := Constraint{}
b.Include = []string{"feature/*"}
g.Assert(b.Match("feature/foo")).IsTrue()
})
g.It("Should fail to match include pattern", func() {
b := Constraint{}
b.Include = []string{"feature/*"}
g.Assert(b.Match("master")).IsFalse()
})
g.It("Should match exclude", func() {
b := Constraint{}
b.Exclude = []string{"master"}
g.Assert(b.Match("master")).IsFalse()
})
g.It("Should match exclude pattern", func() {
b := Constraint{}
b.Exclude = []string{"feature/*"}
g.Assert(b.Match("feature/foo")).IsFalse()
})
g.It("Should match when eclude patterns mismatch", func() {
b := Constraint{}
b.Exclude = []string{"foo"}
g.Assert(b.Match("bar")).IsTrue()
})
})
}
func TestConstraintMap(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Constraint Map", func() {
g.It("Should parse and match emtpy", func() {
p := map[string]string{"golang": "1.5", "redis": "3.2"}
c := parseConstraintMap("")
g.Assert(c.Match(p)).IsTrue()
})
g.It("Should parse and match", func() {
p := map[string]string{"golang": "1.5", "redis": "3.2"}
c := parseConstraintMap("{ include: { golang: 1.5 } }")
g.Assert(c.Include["golang"]).Equal("1.5")
g.Assert(c.Match(p)).IsTrue()
})
g.It("Should parse and match shortand", func() {
p := map[string]string{"golang": "1.5", "redis": "3.2"}
c := parseConstraintMap("{ golang: 1.5 }")
g.Assert(c.Include["golang"]).Equal("1.5")
g.Assert(c.Match(p)).IsTrue()
})
g.It("Should parse and match exclude", func() {
p := map[string]string{"golang": "1.5", "redis": "3.2"}
c := parseConstraintMap("{ exclude: { golang: 1.5 } }")
g.Assert(c.Exclude["golang"]).Equal("1.5")
g.Assert(c.Match(p)).IsFalse()
})
g.It("Should parse and mismatch exclude", func() {
p := map[string]string{"golang": "1.5", "redis": "3.2"}
c := parseConstraintMap("{ exclude: { golang: 1.5, redis: 2.8 } }")
g.Assert(c.Exclude["golang"]).Equal("1.5")
g.Assert(c.Exclude["redis"]).Equal("2.8")
g.Assert(c.Match(p)).IsTrue()
})
})
}
func parseConstraint(s string) *Constraint {
c := &Constraint{}
yaml.Unmarshal([]byte(s), c)
return c
}
func parseConstraintMap(s string) *ConstraintMap {
c := &ConstraintMap{}
yaml.Unmarshal([]byte(s), c)
return c
}

View file

@ -1,166 +0,0 @@
package yaml
import (
"fmt"
"github.com/drone/drone/yaml/types"
"gopkg.in/yaml.v2"
)
// Auth defines Docker authentication credentials.
type Auth struct {
Username string
Password string
Email string
}
// Container defines a Docker container.
type Container struct {
ID string
Name string
Image string
Build string
Pull bool
AuthConfig Auth
Detached bool
Disabled bool
Privileged bool
WorkingDir string
Environment map[string]string
Labels map[string]string
Entrypoint []string
Command []string
Commands []string
ExtraHosts []string
Volumes []string
VolumesFrom []string
Devices []string
Network string
DNS []string
DNSSearch []string
MemSwapLimit int64
MemLimit int64
ShmSize int64
CPUQuota int64
CPUShares int64
CPUSet string
OomKillDisable bool
Constraints Constraints
Vargs map[string]interface{}
}
// container is an intermediate type used for decoding a container in a format
// compatible with docker-compose.yml.
// this file has a bunch of custom types that are annoying to work with, which
// is why this is used for intermediate purposes and converted to something
// easier to work with.
type container struct {
Name string `yaml:"name"`
Image string `yaml:"image"`
Build string `yaml:"build"`
Pull bool `yaml:"pull"`
Detached bool `yaml:"detach"`
Privileged bool `yaml:"privileged"`
Environment types.MapEqualSlice `yaml:"environment"`
Labels types.MapEqualSlice `yaml:"labels"`
Entrypoint types.StringOrSlice `yaml:"entrypoint"`
Command types.StringOrSlice `yaml:"command"`
Commands types.StringOrSlice `yaml:"commands"`
ExtraHosts types.StringOrSlice `yaml:"extra_hosts"`
Volumes types.StringOrSlice `yaml:"volumes"`
VolumesFrom types.StringOrSlice `yaml:"volumes_from"`
Devices types.StringOrSlice `yaml:"devices"`
Network string `yaml:"network_mode"`
DNS types.StringOrSlice `yaml:"dns"`
DNSSearch types.StringOrSlice `yaml:"dns_search"`
MemSwapLimit int64 `yaml:"memswap_limit"`
MemLimit int64 `yaml:"mem_limit"`
ShmSize int64 `yaml:"shm_size"`
CPUQuota int64 `yaml:"cpu_quota"`
CPUShares int64 `yaml:"cpu_shares"`
CPUSet string `yaml:"cpuset"`
OomKillDisable bool `yaml:"oom_kill_disable"`
AuthConfig struct {
Username string `yaml:"username"`
Password string `yaml:"password"`
Email string `yaml:"email"`
Token string `yaml:"registry_token"`
} `yaml:"auth_config"`
Constraints Constraints `yaml:"when"`
Vargs map[string]interface{} `yaml:",inline"`
}
// containerList is an intermediate type used for decoding a slice of containers
// in a format compatible with docker-compose.yml
type containerList struct {
containers []*Container
}
// UnmarshalYAML implements custom Yaml unmarshaling.
func (c *containerList) UnmarshalYAML(unmarshal func(interface{}) error) error {
slice := yaml.MapSlice{}
err := unmarshal(&slice)
if err != nil {
return err
}
for _, s := range slice {
cc := container{}
out, merr := yaml.Marshal(s.Value)
if err != nil {
return merr
}
err = yaml.Unmarshal(out, &cc)
if err != nil {
return err
}
if cc.Name == "" {
cc.Name = fmt.Sprintf("%v", s.Key)
}
if cc.Image == "" {
cc.Image = fmt.Sprintf("%v", s.Key)
}
c.containers = append(c.containers, &Container{
Name: cc.Name,
Image: cc.Image,
Build: cc.Build,
Pull: cc.Pull,
Detached: cc.Detached,
Privileged: cc.Privileged,
Environment: cc.Environment.Map(),
Labels: cc.Labels.Map(),
Entrypoint: cc.Entrypoint.Slice(),
Command: cc.Command.Slice(),
Commands: cc.Commands.Slice(),
ExtraHosts: cc.ExtraHosts.Slice(),
Volumes: cc.Volumes.Slice(),
VolumesFrom: cc.VolumesFrom.Slice(),
Devices: cc.Devices.Slice(),
Network: cc.Network,
DNS: cc.DNS.Slice(),
DNSSearch: cc.DNSSearch.Slice(),
MemSwapLimit: cc.MemSwapLimit,
MemLimit: cc.MemLimit,
ShmSize: cc.ShmSize,
CPUQuota: cc.CPUQuota,
CPUShares: cc.CPUShares,
CPUSet: cc.CPUSet,
OomKillDisable: cc.OomKillDisable,
Vargs: cc.Vargs,
AuthConfig: Auth{
Username: cc.AuthConfig.Username,
Password: cc.AuthConfig.Password,
Email: cc.AuthConfig.Email,
},
Constraints: cc.Constraints,
})
}
return err
}

View file

@ -1,99 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
"gopkg.in/yaml.v2"
)
func TestContainerNode(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Containers", func() {
g.Describe("given a yaml file", func() {
g.It("should unmarshal", func() {
in := []byte(sampleContainer)
out := containerList{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
g.Assert(len(out.containers)).Equal(1)
c := out.containers[0]
g.Assert(c.Name).Equal("foo")
g.Assert(c.Image).Equal("golang")
g.Assert(c.Build).Equal(".")
g.Assert(c.Pull).Equal(true)
g.Assert(c.Detached).Equal(true)
g.Assert(c.Privileged).Equal(true)
g.Assert(c.Entrypoint).Equal([]string{"/bin/sh"})
g.Assert(c.Command).Equal([]string{"yes"})
g.Assert(c.Commands).Equal([]string{"whoami"})
g.Assert(c.ExtraHosts).Equal([]string{"foo.com"})
g.Assert(c.Volumes).Equal([]string{"/foo:/bar"})
g.Assert(c.VolumesFrom).Equal([]string{"foo"})
g.Assert(c.Devices).Equal([]string{"/dev/tty0"})
g.Assert(c.Network).Equal("bridge")
g.Assert(c.DNS).Equal([]string{"8.8.8.8"})
g.Assert(c.MemSwapLimit).Equal(int64(1))
g.Assert(c.MemLimit).Equal(int64(2))
g.Assert(c.CPUQuota).Equal(int64(3))
g.Assert(c.CPUSet).Equal("1,2")
g.Assert(c.OomKillDisable).Equal(true)
g.Assert(c.AuthConfig.Username).Equal("octocat")
g.Assert(c.AuthConfig.Password).Equal("password")
g.Assert(c.AuthConfig.Email).Equal("octocat@github.com")
g.Assert(c.Vargs["access_key"]).Equal("970d28f4dd477bc184fbd10b376de753")
g.Assert(c.Vargs["secret_key"]).Equal("9c5785d3ece6a9cdefa42eb99b58986f9095ff1c")
})
g.It("should unmarshal named", func() {
in := []byte("foo: { name: bar }")
out := containerList{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
g.Assert(len(out.containers)).Equal(1)
g.Assert(out.containers[0].Name).Equal("bar")
})
})
})
}
var sampleContainer = `
foo:
image: golang
build: .
pull: true
detach: true
privileged: true
environment:
FOO: BAR
entrypoint: /bin/sh
command: "yes"
commands: whoami
extra_hosts: foo.com
volumes: /foo:/bar
volumes_from: foo
devices: /dev/tty0
network_mode: bridge
dns: 8.8.8.8
memswap_limit: 1
mem_limit: 2
cpu_quota: 3
cpuset: 1,2
oom_kill_disable: true
auth_config:
username: octocat
password: password
email: octocat@github.com
access_key: 970d28f4dd477bc184fbd10b376de753
secret_key: 9c5785d3ece6a9cdefa42eb99b58986f9095ff1c
`

View file

@ -1,26 +0,0 @@
package yaml
import (
"gopkg.in/yaml.v2"
"github.com/drone/drone/yaml/types"
)
// ParseLabel parses the labels section of the Yaml document.
func ParseLabel(in []byte) map[string]string {
out := struct {
Labels types.MapEqualSlice `yaml:"labels"`
}{}
yaml.Unmarshal(in, &out)
labels := out.Labels.Map()
if labels == nil {
labels = make(map[string]string)
}
return labels
}
// ParseLabelString parses the labels section of the Yaml document.
func ParseLabelString(in string) map[string]string {
return ParseLabel([]byte(in))
}

View file

@ -1,32 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
)
func TestLabel(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Label parser", func() {
g.It("Should parse empty yaml", func() {
labels := ParseLabelString("")
g.Assert(len(labels)).Equal(0)
})
g.It("Should parse slice", func() {
labels := ParseLabelString("labels: [foo=bar, baz=boo]")
g.Assert(len(labels)).Equal(2)
g.Assert(labels["foo"]).Equal("bar")
g.Assert(labels["baz"]).Equal("boo")
})
g.It("Should parse map", func() {
labels := ParseLabelString("labels: {foo: bar, baz: boo}")
g.Assert(labels["foo"]).Equal("bar")
g.Assert(labels["baz"]).Equal("boo")
})
})
}

View file

@ -1,117 +0,0 @@
package yaml
import (
"strings"
"gopkg.in/yaml.v2"
)
const (
limitTags = 10
limitAxis = 25
)
// Matrix represents the build matrix.
type Matrix map[string][]string
// Axis represents a single permutation of entries from the build matrix.
type Axis map[string]string
// String returns a string representation of an Axis as a comma-separated list
// of environment variables.
func (a Axis) String() string {
var envs []string
for k, v := range a {
envs = append(envs, k+"="+v)
}
return strings.Join(envs, " ")
}
// ParseMatrix parses the Yaml matrix definition.
func ParseMatrix(data []byte) ([]Axis, error) {
axis, err := parseMatrixList(data)
if err == nil && len(axis) != 0 {
return axis, nil
}
matrix, err := parseMatrix(data)
if err != nil {
return nil, err
}
// if not a matrix build return an array with just the single axis.
if len(matrix) == 0 {
return nil, nil
}
return calcMatrix(matrix), nil
}
// ParseMatrixString parses the Yaml string matrix definition.
func ParseMatrixString(data string) ([]Axis, error) {
return ParseMatrix([]byte(data))
}
func calcMatrix(matrix Matrix) []Axis {
// calculate number of permutations and extract the list of tags
// (ie go_version, redis_version, etc)
var perm int
var tags []string
for k, v := range matrix {
perm *= len(v)
if perm == 0 {
perm = len(v)
}
tags = append(tags, k)
}
// structure to hold the transformed result set
axisList := []Axis{}
// for each axis calculate the uniqe set of values that should be used.
for p := 0; p < perm; p++ {
axis := map[string]string{}
decr := perm
for i, tag := range tags {
elems := matrix[tag]
decr = decr / len(elems)
elem := p / decr % len(elems)
axis[tag] = elems[elem]
// enforce a maximum number of tags in the build matrix.
if i > limitTags {
break
}
}
// append to the list of axis.
axisList = append(axisList, axis)
// enforce a maximum number of axis that should be calculated.
if p > limitAxis {
break
}
}
return axisList
}
func parseMatrix(raw []byte) (Matrix, error) {
data := struct {
Matrix map[string][]string
}{}
err := yaml.Unmarshal(raw, &data)
return data.Matrix, err
}
func parseMatrixList(raw []byte) ([]Axis, error) {
data := struct {
Matrix struct {
Include []Axis
}
}{}
err := yaml.Unmarshal(raw, &data)
return data.Matrix.Include, err
}

View file

@ -1,70 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
)
func TestMatrix(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Calculate matrix", func() {
axis, _ := ParseMatrixString(fakeMatrix)
g.It("Should calculate permutations", func() {
g.Assert(len(axis)).Equal(24)
})
g.It("Should not duplicate permutations", func() {
set := map[string]bool{}
for _, perm := range axis {
set[perm.String()] = true
}
g.Assert(len(set)).Equal(24)
})
g.It("Should return nil if no matrix", func() {
axis, err := ParseMatrixString("")
g.Assert(err == nil).IsTrue()
g.Assert(axis == nil).IsTrue()
})
g.It("Should return included axis", func() {
axis, err := ParseMatrixString(fakeMatrixInclude)
g.Assert(err == nil).IsTrue()
g.Assert(len(axis)).Equal(2)
g.Assert(axis[0]["go_version"]).Equal("1.5")
g.Assert(axis[1]["go_version"]).Equal("1.6")
g.Assert(axis[0]["python_version"]).Equal("3.4")
g.Assert(axis[1]["python_version"]).Equal("3.4")
})
})
}
var fakeMatrix = `
matrix:
go_version:
- go1
- go1.2
python_version:
- 3.2
- 3.3
django_version:
- 1.7
- 1.7.1
- 1.7.2
redis_version:
- 2.6
- 2.8
`
var fakeMatrixInclude = `
matrix:
include:
- go_version: 1.5
python_version: 3.4
- go_version: 1.6
python_version: 3.4
`

View file

@ -1,51 +0,0 @@
package yaml
import (
"fmt"
"gopkg.in/yaml.v2"
)
// Network defines a Docker network.
type Network struct {
Name string
Driver string
DriverOpts map[string]string `yaml:"driver_opts"`
}
// networkList is an intermediate type used for decoding a slice of networks
// in a format compatible with docker-compose.yml
type networkList struct {
networks []*Network
}
// UnmarshalYAML implements custom Yaml unmarshaling.
func (n *networkList) UnmarshalYAML(unmarshal func(interface{}) error) error {
slice := yaml.MapSlice{}
err := unmarshal(&slice)
if err != nil {
return err
}
for _, s := range slice {
nn := Network{}
out, merr := yaml.Marshal(s.Value)
if merr != nil {
return merr
}
err = yaml.Unmarshal(out, &nn)
if err != nil {
return err
}
if nn.Name == "" {
nn.Name = fmt.Sprintf("%v", s.Key)
}
if nn.Driver == "" {
nn.Driver = "bridge"
}
n.networks = append(n.networks, &nn)
}
return err
}

View file

@ -1,51 +0,0 @@
package yaml
import (
"testing"
"github.com/franela/goblin"
"gopkg.in/yaml.v2"
)
func TestNetworks(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Networks", func() {
g.Describe("given a yaml file", func() {
g.It("should unmarshal", func() {
in := []byte("foo: { driver: overlay }")
out := networkList{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
g.Assert(len(out.networks)).Equal(1)
g.Assert(out.networks[0].Name).Equal("foo")
g.Assert(out.networks[0].Driver).Equal("overlay")
})
g.It("should unmarshal named", func() {
in := []byte("foo: { name: bar }")
out := networkList{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
g.Assert(len(out.networks)).Equal(1)
g.Assert(out.networks[0].Name).Equal("bar")
})
g.It("should unmarshal and use default driver", func() {
in := []byte("foo: { name: bar }")
out := networkList{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
g.Assert(len(out.networks)).Equal(1)
g.Assert(out.networks[0].Driver).Equal("bridge")
})
})
})
}

View file

@ -1,26 +0,0 @@
package yaml
import "gopkg.in/yaml.v2"
// ParsePlatform parses the platform section of the Yaml document.
func ParsePlatform(in []byte) string {
out := struct {
Platform string `yaml:"platform"`
}{}
yaml.Unmarshal(in, &out)
return out.Platform
}
// ParsePlatformString parses the platform section of the Yaml document.
func ParsePlatformString(in string) string {
return ParsePlatform([]byte(in))
}
// ParsePlatformDefault parses the platform section of the Yaml document.
func ParsePlatformDefault(in []byte, platform string) string {
if p := ParsePlatform([]byte(in)); p != "" {
return p
}
return platform
}

View file

@ -1,32 +0,0 @@
package transform
import "github.com/drone/drone/yaml"
const clone = "clone"
// Clone transforms the Yaml to include a clone step.
func Clone(c *yaml.Config, plugin string) error {
switch plugin {
case "", "git":
plugin = "plugins/git:latest"
case "hg":
plugin = "plugins/hg:latest"
}
for _, p := range c.Pipeline {
if p.Name == clone {
if p.Image == "" {
p.Image = plugin
}
return nil
}
}
s := &yaml.Container{
Image: plugin,
Name: clone,
}
c.Pipeline = append([]*yaml.Container{s}, c.Pipeline...)
return nil
}

View file

@ -1 +0,0 @@
package transform

View file

@ -1,83 +0,0 @@
package transform
import (
"bytes"
"encoding/base64"
"fmt"
"strings"
"github.com/drone/drone/yaml"
)
// CommandTransform transforms the custom shell commands in the Yaml pipeline
// into a container ENTRYPOINT and and CMD for execution.
func CommandTransform(c *yaml.Config) error {
for _, p := range c.Pipeline {
if isPlugin(p) {
continue
}
p.Entrypoint = []string{
"/bin/sh", "-c",
}
p.Command = []string{
"echo $DRONE_SCRIPT | base64 -d | /bin/sh -e",
}
if p.Environment == nil {
p.Environment = map[string]string{}
}
p.Environment["HOME"] = "/root"
p.Environment["SHELL"] = "/bin/sh"
p.Environment["DRONE_SCRIPT"] = toScript(
p.Commands,
)
}
return nil
}
func toScript(commands []string) string {
var buf bytes.Buffer
for _, command := range commands {
escaped := fmt.Sprintf("%q", command)
escaped = strings.Replace(escaped, "$", `\$`, -1)
buf.WriteString(fmt.Sprintf(
traceScript,
escaped,
command,
))
}
script := fmt.Sprintf(
setupScript,
buf.String(),
)
return base64.StdEncoding.EncodeToString([]byte(script))
}
// setupScript is a helper script this is added to the build to ensure
// a minimum set of environment variables are set correctly.
const setupScript = `
if [ -n "$DRONE_NETRC_MACHINE" ]; then
cat <<EOF > $HOME/.netrc
machine $DRONE_NETRC_MACHINE
login $DRONE_NETRC_USERNAME
password $DRONE_NETRC_PASSWORD
EOF
chmod 0600 $HOME/.netrc
fi
unset DRONE_NETRC_USERNAME
unset DRONE_NETRC_PASSWORD
unset DRONE_SCRIPT
%s
`
// traceScript is a helper script that is added to the build script
// to trace a command.
const traceScript = `
echo + %s
%s
`

Some files were not shown because too many files have changed in this diff Show more