simplified build execution

This commit is contained in:
Brad Rydzewski 2016-05-09 11:28:49 -07:00
parent 0befdf034b
commit b0879fe47e
19 changed files with 1006 additions and 33 deletions

View file

@ -23,6 +23,7 @@ type Container struct {
Pull bool
AuthConfig Auth
Detached bool
Disabled bool
Privileged bool
WorkingDir string
Environment map[string]string

View file

@ -0,0 +1 @@
package interpreter

View file

@ -0,0 +1 @@
This is an internal copy of the Docker stdcopy package that removes the logrus debug logging. The original package is found at https://github.com/docker/docker/tree/master/pkg/stdcopy

View file

@ -0,0 +1,167 @@
package internal
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
// StdType is the type of standard stream
// a writer can multiplex to.
type StdType byte
const (
// Stdin represents standard input stream type.
Stdin StdType = iota
// Stdout represents standard output stream type.
Stdout
// Stderr represents standard error steam type.
Stderr
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
// stdWriter is wrapper of io.Writer with extra customized info.
type stdWriter struct {
io.Writer
prefix byte
}
// Write sends the buffer to the underneath writer.
// It insert the prefix header before the buffer,
// so stdcopy.StdCopy knows where to multiplex the output.
// It makes stdWriter to implement io.Writer.
func (w *stdWriter) Write(buf []byte) (n int, err error) {
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instantiated")
}
if buf == nil {
return 0, nil
}
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf)))
line := append(header[:], buf...)
n, err = w.Writer.Write(line)
n -= stdWriterPrefixLen
if n < 0 {
n = 0
}
return
}
// NewStdWriter instantiates a new Writer.
// Everything written to it will be encapsulated using a custom format,
// and written to the underlying `w` stream.
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
func NewStdWriter(w io.Writer, t StdType) io.Writer {
return &stdWriter{
Writer: w,
prefix: byte(t),
}
}
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
// previously multiplexed together using a StdWriter instance.
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
//
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
// Make sure we have at least a full header
for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < stdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Check the first byte to know where to write
switch StdType(buf[stdWriterFdIndex]) {
case Stdin:
fallthrough
case Stdout:
// Write on stdout
out = dstout
case Stderr:
// Write on stderr
out = dsterr
default:
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+stdWriterPrefixLen > bufLen {
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+stdWriterPrefixLen {
return written, nil
}
break
}
if er != nil {
return 0, er
}
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
if ew != nil {
return 0, ew
}
// If the frame has not been fully written: error
if nw != frameSize {
return 0, io.ErrShortWrite
}
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+stdWriterPrefixLen:])
// Move the index
nr -= frameSize + stdWriterPrefixLen
}
}

View file

@ -0,0 +1,260 @@
package internal
import (
"bytes"
"errors"
"io"
"io/ioutil"
"strings"
"testing"
)
func TestNewStdWriter(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
if writer == nil {
t.Fatalf("NewStdWriter with an invalid StdType should not return nil.")
}
}
func TestWriteWithUnitializedStdWriter(t *testing.T) {
writer := stdWriter{
Writer: nil,
prefix: byte(Stdout),
}
n, err := writer.Write([]byte("Something here"))
if n != 0 || err == nil {
t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter")
}
}
func TestWriteWithNilBytes(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
n, err := writer.Write(nil)
if err != nil {
t.Fatalf("Shouldn't have fail when given no data")
}
if n > 0 {
t.Fatalf("Write should have written 0 byte, but has written %d", n)
}
}
func TestWrite(t *testing.T) {
writer := NewStdWriter(ioutil.Discard, Stdout)
data := []byte("Test StdWrite.Write")
n, err := writer.Write(data)
if err != nil {
t.Fatalf("Error while writing with StdWrite")
}
if n != len(data) {
t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n)
}
}
type errWriter struct {
n int
err error
}
func (f *errWriter) Write(buf []byte) (int, error) {
return f.n, f.err
}
func TestWriteWithWriterError(t *testing.T) {
expectedError := errors.New("expected")
expectedReturnedBytes := 10
writer := NewStdWriter(&errWriter{
n: stdWriterPrefixLen + expectedReturnedBytes,
err: expectedError}, Stdout)
data := []byte("This won't get written, sigh")
n, err := writer.Write(data)
if err != expectedError {
t.Fatalf("Didn't get expected error.")
}
if n != expectedReturnedBytes {
t.Fatalf("Didn't get expected written bytes %d, got %d.",
expectedReturnedBytes, n)
}
}
func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) {
writer := NewStdWriter(&errWriter{n: -1}, Stdout)
data := []byte("This won't get written, sigh")
actual, _ := writer.Write(data)
if actual != 0 {
t.Fatalf("Expected returned written bytes equal to 0, got %d", actual)
}
}
func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) {
buffer = new(bytes.Buffer)
dstOut := NewStdWriter(buffer, Stdout)
_, err = dstOut.Write(stdOutBytes)
if err != nil {
return
}
dstErr := NewStdWriter(buffer, Stderr)
_, err = dstErr.Write(stdErrBytes)
return
}
func TestStdCopyWriteAndRead(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer)
if err != nil {
t.Fatal(err)
}
expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes)
if written != int64(expectedTotalWritten) {
t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written)
}
}
type customReader struct {
n int
err error
totalCalls int
correctCalls int
src *bytes.Buffer
}
func (f *customReader) Read(buf []byte) (int, error) {
f.totalCalls++
if f.totalCalls <= f.correctCalls {
return f.src.Read(buf)
}
return f.n, f.err
}
func TestStdCopyReturnsErrorReadingHeader(t *testing.T) {
expectedError := errors.New("error")
reader := &customReader{
err: expectedError}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
if written != 0 {
t.Fatalf("Expected 0 bytes read, got %d", written)
}
if err != expectedError {
t.Fatalf("Didn't get expected error")
}
}
func TestStdCopyReturnsErrorReadingFrame(t *testing.T) {
expectedError := errors.New("error")
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
reader := &customReader{
correctCalls: 1,
n: stdWriterPrefixLen + 1,
err: expectedError,
src: buffer}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
if written != 0 {
t.Fatalf("Expected 0 bytes read, got %d", written)
}
if err != expectedError {
t.Fatalf("Didn't get expected error")
}
}
func TestStdCopyDetectsCorruptedFrame(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
reader := &customReader{
correctCalls: 1,
n: stdWriterPrefixLen + 1,
err: io.EOF,
src: buffer}
written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
if written != startingBufLen {
t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written)
}
if err != nil {
t.Fatal("Didn't get nil error")
}
}
func TestStdCopyWithInvalidInputHeader(t *testing.T) {
dstOut := NewStdWriter(ioutil.Discard, Stdout)
dstErr := NewStdWriter(ioutil.Discard, Stderr)
src := strings.NewReader("Invalid input")
_, err := StdCopy(dstOut, dstErr, src)
if err == nil {
t.Fatal("StdCopy with invalid input header should fail.")
}
}
func TestStdCopyWithCorruptedPrefix(t *testing.T) {
data := []byte{0x01, 0x02, 0x03}
src := bytes.NewReader(data)
written, err := StdCopy(nil, nil, src)
if err != nil {
t.Fatalf("StdCopy should not return an error with corrupted prefix.")
}
if written != 0 {
t.Fatalf("StdCopy should have written 0, but has written %d", written)
}
}
func TestStdCopyReturnsWriteErrors(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
expectedError := errors.New("expected")
dstOut := &errWriter{err: expectedError}
written, err := StdCopy(dstOut, ioutil.Discard, buffer)
if written != 0 {
t.Fatalf("StdCopy should have written 0, but has written %d", written)
}
if err != expectedError {
t.Fatalf("Didn't get expected error, got %v", err)
}
}
func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) {
stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
if err != nil {
t.Fatal(err)
}
dstOut := &errWriter{n: startingBufLen - 10}
written, err := StdCopy(dstOut, ioutil.Discard, buffer)
if written != 0 {
t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written)
}
if err != io.ErrShortWrite {
t.Fatalf("Didn't get expected io.ErrShortWrite error")
}
}
func BenchmarkWrite(b *testing.B) {
w := NewStdWriter(ioutil.Discard, Stdout)
data := []byte("Test line for testing stdwriter performance\n")
data = bytes.Repeat(data, 100)
b.SetBytes(int64(len(data)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := w.Write(data); err != nil {
b.Fatal(err)
}
}
}

View file

@ -5,8 +5,10 @@ import (
"fmt"
"io"
"strings"
"time"
"github.com/drone/drone/yaml"
"github.com/drone/drone/yaml/interpreter/internal"
"github.com/samalba/dockerclient"
)
@ -22,10 +24,12 @@ type Pipeline struct {
conf *yaml.Config
head *element
tail *element
pipe chan (*Line)
next chan (error)
done chan (error)
err error
ambassador string
containers []string
volumes []string
networks []string
@ -34,20 +38,24 @@ type Pipeline struct {
}
// Load loads the pipeline from the Yaml configuration file.
func Load(conf *yaml.Config) *Pipeline {
func Load(conf *yaml.Config, client dockerclient.Client) *Pipeline {
pipeline := Pipeline{
conf: conf,
next: make(chan error),
done: make(chan error),
client: client,
pipe: make(chan *Line, 500), // buffer 500 lines of logs
next: make(chan error),
done: make(chan error),
}
var containers []*yaml.Container
containers = append(containers, conf.Services...)
containers = append(containers, conf.Pipeline...)
for i, c := range containers {
for _, c := range containers {
if c.Disabled {
continue
}
next := &element{Container: c}
if i == 0 {
if pipeline.head == nil {
pipeline.head = next
pipeline.tail = next
} else {
@ -80,11 +88,13 @@ func (p *Pipeline) Next() <-chan error {
// Exec executes the current step.
func (p *Pipeline) Exec() {
err := p.exec(p.head.Container)
if err != nil {
p.err = err
}
p.step()
go func() {
err := p.exec(p.head.Container)
if err != nil {
p.err = err
}
p.step()
}()
}
// Skip skips the current step.
@ -92,6 +102,11 @@ func (p *Pipeline) Skip() {
p.step()
}
// Pipe returns the build output pipe.
func (p *Pipeline) Pipe() <-chan *Line {
return p.pipe
}
// Head returns the head item in the list.
func (p *Pipeline) Head() *yaml.Container {
return p.head.Container
@ -104,8 +119,9 @@ func (p *Pipeline) Tail() *yaml.Container {
// Stop stops the pipeline.
func (p *Pipeline) Stop() {
p.close(ErrTerm)
return
go func() {
p.done <- ErrTerm
}()
}
// Setup prepares the build pipeline environment.
@ -126,26 +142,29 @@ func (p *Pipeline) Teardown() {
for _, id := range p.volumes {
p.client.RemoveVolume(id)
}
close(p.next)
close(p.done)
close(p.pipe)
}
// step steps through the pipeline to head.next
func (p *Pipeline) step() {
if p.head == p.tail {
p.close(nil)
return
go func() {
p.done <- nil
}()
} else {
go func() {
p.head = p.head.next
p.next <- nil
}()
}
go func() {
p.head = p.head.next
p.next <- nil
}()
}
// close closes open channels and signals the pipeline is done.
func (p *Pipeline) close(err error) {
go func() {
p.done <- nil
close(p.next)
close(p.done)
p.done <- err
}()
}
@ -156,7 +175,7 @@ func (p *Pipeline) exec(c *yaml.Container) error {
// check for the image and pull if not exists or if configured to always
// pull the latest version.
_, err := p.client.InspectImage(c.Image)
if err == nil || c.Pull {
if err != nil || c.Pull {
err = p.client.PullImage(c.Image, auth)
if err != nil {
return err
@ -184,15 +203,15 @@ func (p *Pipeline) exec(c *yaml.Container) error {
defer rc.Close()
num := 0
// now := time.Now().UTC()
now := time.Now().UTC()
scanner := bufio.NewScanner(rc)
for scanner.Scan() {
// r.pipe.lines <- &Line{
// Proc: c.Name,
// Time: int64(time.Since(now).Seconds()),
// Pos: num,
// Out: scanner.Text(),
// }
p.pipe <- &Line{
Proc: c.Name,
Time: int64(time.Since(now).Seconds()),
Pos: num,
Out: scanner.Text(),
}
num++
}
}()
@ -243,7 +262,7 @@ func toLogs(client dockerclient.Client, id string) (io.ReadCloser, error) {
defer rc.Close()
// use Docker StdCopy
// internal.StdCopy(pipew, pipew, rc)
internal.StdCopy(pipew, pipew, rc)
// check to see if the container is still running. If not, we can safely
// exit and assume there are no more logs left to stream.

View file

@ -14,14 +14,19 @@ func TestInterpreter(t *testing.T) {
t.Fatal(err)
}
pipeline := Load(conf)
pipeline := Load(conf, nil)
pipeline.pipe <- &Line{Out: "foo"}
pipeline.pipe <- &Line{Out: "bar"}
pipeline.pipe <- &Line{Out: "baz"}
for {
select {
case <-pipeline.Done():
fmt.Println("GOT DONE")
return
case line := <-pipeline.Pipe():
fmt.Println(line.String())
case <-pipeline.Next():
pipeline.Exec()
}

21
yaml/transform/clone.go Normal file
View file

@ -0,0 +1,21 @@
package transform
import "github.com/drone/drone/yaml"
const clone = "clone"
// Clone transforms the Yaml to include a clone step.
func Clone(c *yaml.Config, plugin string) error {
for _, p := range c.Pipeline {
if p.Name == clone {
return nil
}
}
s := &yaml.Container{
Image: plugin,
Name: clone,
}
c.Pipeline = append([]*yaml.Container{s}, c.Pipeline...)
return nil
}

82
yaml/transform/command.go Normal file
View file

@ -0,0 +1,82 @@
package transform
import (
"bytes"
"encoding/base64"
"fmt"
"strings"
"github.com/drone/drone/yaml"
)
// CommandTransform transforms the custom shell commands in the Yaml pipeline
// into a container ENTRYPOINT and and CMD for execution.
func CommandTransform(c *yaml.Config) error {
for _, p := range c.Pipeline {
if len(p.Commands) == 0 {
continue
}
p.Entrypoint = []string{
"/bin/sh", "-c",
}
p.Command = []string{
"echo $DRONE_SCRIPT | base64 -d | /bin/sh -e",
}
if p.Environment == nil {
p.Environment = map[string]string{}
}
p.Environment["HOME"] = "/root"
p.Environment["SHELL"] = "/bin/sh"
p.Environment["DRONE_SCRIPT"] = toScript(
p.Commands,
)
}
return nil
}
func toScript(commands []string) string {
var buf bytes.Buffer
for _, command := range commands {
escaped := fmt.Sprintf("%q", command)
escaped = strings.Replace(command, "$", `$\`, -1)
buf.WriteString(fmt.Sprintf(
traceScript,
escaped,
command,
))
}
script := fmt.Sprintf(
setupScript,
buf.String(),
)
return base64.StdEncoding.EncodeToString([]byte(script))
}
// setupScript is a helper script this is added to the build to ensure
// a minimum set of environment variables are set correctly.
const setupScript = `
if [ -n "$DRONE_NETRC_MACHINE" ]; then
cat <<EOF > $HOME/.netrc
machine $DRONE_NETRC_MACHINE
login $DRONE_NETRC_USERNAME
password $DRONE_NETRC_PASSWORD
EOF
fi
unset DRONE_NETRC_USERNAME
unset DRONE_NETRC_PASSWORD
unset DRONE_SCRIPT
%s
`
// traceScript is a helper script that is added to the build script
// to trace a command.
const traceScript = `
echo + %s
%s
`

20
yaml/transform/environ.go Normal file
View file

@ -0,0 +1,20 @@
package transform
import "github.com/drone/drone/yaml"
// Environ transforms the steps in the Yaml pipeline to include runtime
// environment variables.
func Environ(c *yaml.Config, envs map[string]string) error {
for _, p := range c.Pipeline {
if p.Environment == nil {
p.Environment = map[string]string{}
}
for k, v := range envs {
if v == "" {
continue
}
p.Environment[k] = v
}
}
return nil
}

View file

@ -0,0 +1,30 @@
package transform
import (
"encoding/base64"
"fmt"
"github.com/drone/drone/yaml"
"github.com/gorilla/securecookie"
)
// Identifier transforms the container steps in the Yaml and assigns a unique
// container identifier.
func Identifier(c *yaml.Config) error {
// creates a random prefix for the build
rand := base64.RawURLEncoding.EncodeToString(
securecookie.GenerateRandomKey(8),
)
for i, step := range c.Services {
step.ID = fmt.Sprintf("drone_%s_%d", rand, i)
}
for i, step := range c.Pipeline {
step.ID = fmt.Sprintf("drone_%s_%d", rand, i+len(c.Services))
}
return nil
}

63
yaml/transform/image.go Normal file
View file

@ -0,0 +1,63 @@
package transform
import (
"path/filepath"
"strings"
"github.com/drone/drone/yaml"
)
func ImagePull(conf *yaml.Config, pull bool) error {
for _, plugin := range conf.Pipeline {
if len(plugin.Commands) == 0 || len(plugin.Vargs) == 0 {
continue
}
plugin.Pull = pull
}
return nil
}
func ImageTag(conf *yaml.Config) error {
for _, image := range conf.Pipeline {
if !strings.Contains(image.Image, ":") {
image.Image = image.Image + ":latest"
}
}
for _, image := range conf.Services {
if !strings.Contains(image.Image, ":") {
image.Image = image.Image + ":latest"
}
}
return nil
}
func ImageName(conf *yaml.Config) error {
for _, image := range conf.Pipeline {
image.Image = strings.Replace(image.Image, "_", "-", -1)
}
return nil
}
func ImageNamespace(conf *yaml.Config, namespace string) error {
for _, image := range conf.Pipeline {
if strings.Contains(image.Image, "/") {
continue
}
if len(image.Vargs) == 0 {
continue
}
image.Image = filepath.Join(namespace, image.Image)
}
return nil
}
func ImageEscalate(conf *yaml.Config, patterns []string) error {
for _, c := range conf.Pipeline {
for _, pattern := range patterns {
if ok, _ := filepath.Match(pattern, c.Image); ok {
c.Privileged = true
}
}
}
return nil
}

80
yaml/transform/plugin.go Normal file
View file

@ -0,0 +1,80 @@
package transform
import "github.com/drone/drone/yaml"
// PluginDisable disables plugins. This is intended for use when executing the
// pipeline locally on your own computer.
func PluginDisable(conf *yaml.Config, disabled bool) {
for _, container := range conf.Pipeline {
if len(container.Vargs) != 0 || container.Name == "clone" {
container.Disabled = disabled
}
}
}
//
// import (
// "fmt"
// "reflect"
// "strconv"
// "strings"
//
// "github.com/drone/drone/yaml"
// "github.com/libcd/libyaml/parse"
//
// json "github.com/ghodss/yaml"
// "gopkg.in/yaml.v2"
// )
//
// func
//
// // argsToEnv uses reflection to convert a map[string]interface to a list
// // of environment variables.
// func argsToEnv(from map[string]interface{}, to map[string]string) error {
//
// for k, v := range from {
// t := reflect.TypeOf(v)
// vv := reflect.ValueOf(v)
//
// k = "PLUGIN_" + strings.ToUpper(k)
//
// switch t.Kind() {
// case reflect.Bool:
// to[k] = strconv.FormatBool(vv.Bool())
//
// case reflect.String:
// to[k] = vv.String()
//
// case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
// to[k] = fmt.Sprintf("%v", vv.Int())
//
// case reflect.Float32, reflect.Float64:
// to[k] = fmt.Sprintf("%v", vv.Float())
//
// case reflect.Map:
// yml, _ := yaml.Marshal(vv.Interface())
// out, _ := json.YAMLToJSON(yml)
// to[k] = string(out)
//
// case reflect.Slice:
// out, err := yaml.Marshal(vv.Interface())
// if err != nil {
// return err
// }
//
// in := []string{}
// err := yaml.Unmarshal(out, &in)
// if err == nil {
// to[k] = strings.Join(in, ",")
// } else {
// out, err = json.YAMLToJSON(out)
// if err != nil {
// return err
// }
// to[k] = string(out)
// }
// }
// }
//
// return nil
// }

60
yaml/transform/pod.go Normal file
View file

@ -0,0 +1,60 @@
package transform
import (
"encoding/base64"
"fmt"
"github.com/drone/drone/yaml"
"github.com/gorilla/securecookie"
)
// Pod transforms the containers in the Yaml to use Pod networking, where every
// container shares the localhost connection.
func Pod(c *yaml.Config) error {
rand := base64.RawURLEncoding.EncodeToString(
securecookie.GenerateRandomKey(8),
)
ambassador := &yaml.Container{
ID: fmt.Sprintf("drone_ambassador_%s", rand),
Name: "ambassador",
Image: "busybox:latest",
Detached: true,
Entrypoint: []string{"/bin/sleep"},
Command: []string{"86400"},
Volumes: []string{c.Workspace.Path, c.Workspace.Base},
}
network := fmt.Sprintf("container:%s", ambassador.ID)
var containers []*yaml.Container
containers = append(containers, c.Pipeline...)
containers = append(containers, c.Services...)
for _, container := range containers {
container.VolumesFrom = append(container.VolumesFrom, ambassador.ID)
if container.Network == "" {
container.Network = network
}
}
c.Services = append([]*yaml.Container{ambassador}, c.Services...)
return nil
}
// func (v *podOp) VisitContainer(node *parse.ContainerNode) error {
// if node.Container.Network == "" {
// parent := fmt.Sprintf("container:%s", v.name)
// node.Container.Network = parent
// }
// node.Container.VolumesFrom = append(node.Container.VolumesFrom, v.name)
// return nil
// }
//
// func (v *podOp) VisitRoot(node *parse.RootNode) error {
//
//
// node.Pod = service
// return nil
// }

31
yaml/transform/secret.go Normal file
View file

@ -0,0 +1,31 @@
package transform
import (
"github.com/drone/drone/model"
"github.com/drone/drone/yaml"
)
func Secret(c *yaml.Config, event string, secrets []*model.Secret) error {
for _, p := range c.Pipeline {
for _, secret := range secrets {
switch secret.Name {
case "REGISTRY_USERNAME":
p.AuthConfig.Username = secret.Value
case "REGISTRY_PASSWORD":
p.AuthConfig.Password = secret.Value
case "REGISTRY_EMAIL":
p.AuthConfig.Email = secret.Value
default:
if p.Environment == nil {
p.Environment = map[string]string{}
}
p.Environment[secret.Name] = secret.Value
}
}
}
return nil
}

View file

@ -0,0 +1,6 @@
package transform
import "github.com/drone/drone/yaml"
// TransformFunc defines an operation for transforming the Yaml file.
type TransformFunc func(*yaml.Config) error

View file

@ -0,0 +1,74 @@
package transform
import (
"fmt"
"github.com/drone/drone/yaml"
)
func Check(c *yaml.Config, trusted bool) error {
var images []*yaml.Container
images = append(images, c.Pipeline...)
images = append(images, c.Services...)
for _, image := range images {
if err := CheckEntrypoint(image); err != nil {
return err
}
if trusted {
continue
}
if err := CheckTrusted(image); err != nil {
return err
}
}
return nil
}
// validate the plugin command and entrypoint and return an error
// the user attempts to set or override these values.
func CheckEntrypoint(c *yaml.Container) error {
if len(c.Vargs) == 0 {
return nil
}
if len(c.Entrypoint) != 0 {
return fmt.Errorf("Cannot set plugin Entrypoint")
}
if len(c.Command) != 0 {
return fmt.Errorf("Cannot set plugin Command")
}
return nil
}
// validate the container configuration and return an error if restricted
// configurations are used.
func CheckTrusted(c *yaml.Container) error {
if c.Privileged {
return fmt.Errorf("Insufficient privileges to use privileged mode")
}
if len(c.DNS) != 0 {
return fmt.Errorf("Insufficient privileges to use custom dns")
}
if len(c.DNSSearch) != 0 {
return fmt.Errorf("Insufficient privileges to use dns_search")
}
if len(c.Devices) != 0 {
return fmt.Errorf("Insufficient privileges to use devices")
}
if len(c.ExtraHosts) != 0 {
return fmt.Errorf("Insufficient privileges to use extra_hosts")
}
if len(c.Network) != 0 {
return fmt.Errorf("Insufficient privileges to override the network")
}
if c.OomKillDisable {
return fmt.Errorf("Insufficient privileges to disable oom_kill")
}
if len(c.Volumes) != 0 {
return fmt.Errorf("Insufficient privileges to use volumes")
}
if len(c.VolumesFrom) != 0 {
return fmt.Errorf("Insufficient privileges to use volumes_from")
}
return nil
}

20
yaml/transform/volume.go Normal file
View file

@ -0,0 +1,20 @@
package transform
import "github.com/drone/drone/yaml"
func ImageVolume(conf *yaml.Config, volumes []string) error {
if len(volumes) == 0 {
return nil
}
var containers []*yaml.Container
containers = append(containers, conf.Pipeline...)
containers = append(containers, conf.Services...)
for _, container := range containers {
container.Volumes = append(container.Volumes, volumes...)
}
return nil
}

View file

@ -0,0 +1,32 @@
package transform
import (
"path/filepath"
"github.com/drone/drone/yaml"
)
// WorkspaceTransform transforms ...
func WorkspaceTransform(c *yaml.Config, base, path string) error {
if c.Workspace == nil {
c.Workspace = &yaml.Workspace{}
}
if c.Workspace.Base == "" {
c.Workspace.Base = base
}
if c.Workspace.Path == "" {
c.Workspace.Path = path
}
if !filepath.IsAbs(c.Workspace.Path) {
c.Workspace.Path = filepath.Join(
c.Workspace.Base,
c.Workspace.Path,
)
}
for _, p := range c.Pipeline {
p.WorkingDir = c.Workspace.Path
}
return nil
}