removed drone-build to its owner repository, drone/drone-exec

This commit is contained in:
Brad Rydzewski 2015-09-08 14:02:48 -07:00
parent 3bb52002d4
commit 008a16f7e7
7 changed files with 1 additions and 695 deletions

View file

@ -1,9 +0,0 @@
# Docker image for the Drone build runner
#
# CGO_ENABLED=0 go build -a -tags netgo
# docker build --rm=true -t drone/drone-build .
FROM gliderlabs/alpine:3.1
RUN apk-install ca-certificates
ADD drone-build /bin/
ENTRYPOINT ["/bin/drone-build"]

View file

@ -1,25 +0,0 @@
# Docker image for the Drone build runner
#
# cd $GOPATH/src/github.com/drone/drone
# docker build --file=cmd/drone-build/Dockerfile.alpine --rm=true -t drone/drone-build .
FROM alpine:3.2
ENV GOROOT=/usr/lib/go \
GOPATH=/gopath \
GOBIN=/gopath/bin \
CGO_ENABLED=0 \
PATH=$PATH:$GOROOT/bin:$GOPATH/bin
WORKDIR /gopath/src/github.com/drone/drone
ADD . /gopath/src/github.com/drone/drone
RUN apk add -U go ca-certificates && \
cd cmd/drone-build && \
go build -a -tags netgo && \
apk del go && \
mv drone-build /bin/drone-build && \
rm -rf /gopath && \
rm -rf /var/cache/apk/*
ENTRYPOINT ["/bin/drone-build"]

View file

@ -1,226 +0,0 @@
package main
import (
"errors"
"os"
"strings"
log "github.com/drone/drone/Godeps/_workspace/src/github.com/Sirupsen/logrus"
"github.com/drone/drone/Godeps/_workspace/src/github.com/samalba/dockerclient"
"github.com/drone/drone/pkg/docker"
)
var (
ErrTimeout = errors.New("Timeout")
ErrLogging = errors.New("Logs not available")
)
var (
// options to fetch the stdout and stderr logs
logOpts = &dockerclient.LogOptions{
Stdout: true,
Stderr: true,
}
// options to fetch the stdout and stderr logs
// by tailing the output.
logOptsTail = &dockerclient.LogOptions{
Follow: true,
Stdout: true,
Stderr: true,
}
)
// client is a wrapper around the default Docker client
// that tracks all created containers ensures some default
// configurations are in place.
type client struct {
dockerclient.Client
info *dockerclient.ContainerInfo
names []string // names of created containers
}
func newClient(docker dockerclient.Client) (*client, error) {
// creates an ambassador container
conf := &dockerclient.ContainerConfig{}
conf.HostConfig = dockerclient.HostConfig{}
conf.Entrypoint = []string{"/bin/sleep"}
conf.Cmd = []string{"86400"}
conf.Image = "gliderlabs/alpine:3.1"
conf.Volumes = map[string]struct{}{}
conf.Volumes["/drone"] = struct{}{}
info, err := daemon(docker, conf, false)
if err != nil {
return nil, err
}
return &client{Client: docker, info: info}, nil
}
// CreateContainer creates a container and internally
// caches its container id.
func (c *client) CreateContainer(conf *dockerclient.ContainerConfig, name string) (string, error) {
conf.Env = append(conf.Env, "affinity:container=="+c.info.Id)
id, err := c.Client.CreateContainer(conf, name)
if err == nil {
c.names = append(c.names, id)
}
return id, err
}
// StartContainer starts a container and links to an
// ambassador container sharing the build machiens volume.
func (c *client) StartContainer(id string, conf *dockerclient.HostConfig) error {
conf.VolumesFrom = append(conf.VolumesFrom, c.info.Id)
if len(conf.NetworkMode) == 0 {
conf.NetworkMode = "container:" + c.info.Id
}
return c.Client.StartContainer(id, conf)
}
// Destroy will terminate and destroy all containers that
// were created by this client.
func (c *client) Destroy() error {
for _, id := range c.names {
c.Client.KillContainer(id, "9")
c.Client.RemoveContainer(id, true, true)
}
c.Client.KillContainer(c.info.Id, "9")
return c.Client.RemoveContainer(c.info.Id, true, true)
}
func run(client dockerclient.Client, conf *dockerclient.ContainerConfig, pull bool) (*dockerclient.ContainerInfo, error) {
// force-pull the image if specified.
// TEMPORARY while we are in beta mode we should always re-pull drone plugins
if pull { //|| strings.HasPrefix(conf.Image, "plugins/") {
client.PullImage(conf.Image, nil)
}
// attempts to create the contianer
id, err := client.CreateContainer(conf, "")
if err != nil {
// and pull the image and re-create if that fails
err = client.PullImage(conf.Image, nil)
if err != nil {
log.Errorf("Error pulling %s. %s\n", conf.Image, err)
return nil, err
}
id, err = client.CreateContainer(conf, "")
// make sure the container is removed in
// the event of a creation error.
if err != nil {
log.Errorf("Error starting %s. %s\n", conf.Image, err)
client.RemoveContainer(id, true, true)
return nil, err
}
}
// ensures the container is always stopped
// and ready to be removed.
defer func() {
client.StopContainer(id, 5)
client.KillContainer(id, "9")
}()
// fetches the container information.
info, err := client.InspectContainer(id)
if err != nil {
log.Errorf("Error inspecting %s. %s\n", conf.Image, err)
client.RemoveContainer(id, true, true)
return nil, err
}
// channel listening for errors while the
// container is running async.
errc := make(chan error, 1)
infoc := make(chan *dockerclient.ContainerInfo, 1)
go func() {
// starts the container
err := client.StartContainer(id, &conf.HostConfig)
if err != nil {
log.Errorf("Error starting %s. %s\n", conf.Image, err)
errc <- err
return
}
// blocks and waits for the container to finish
// by streaming the logs (to /dev/null). Ideally
// we could use the `wait` function instead
rc, err := client.ContainerLogs(id, logOptsTail)
if err != nil {
log.Errorf("Error tailing %s. %s\n", conf.Image, err)
errc <- err
return
}
defer rc.Close()
docker.StdCopy(os.Stdout, os.Stdout, rc)
// fetches the container information
info, err := client.InspectContainer(id)
if err != nil {
log.Errorf("Error getting exit code for %s. %s\n", conf.Image, err)
errc <- err
return
}
infoc <- info
}()
select {
case info := <-infoc:
return info, nil
case err := <-errc:
return info, err
// TODO checkout net.Context and cancel
// case <-time.After(timeout):
// return info, ErrTimeout
}
}
func daemon(client dockerclient.Client, conf *dockerclient.ContainerConfig, pull bool) (*dockerclient.ContainerInfo, error) {
// force-pull the image
if pull {
client.PullImage(conf.Image, nil)
}
// TEMPORARY: always try to pull the new image for now
// since we'll be frequently updating the plugin images
// over the next few weeks
if strings.HasPrefix(conf.Image, "plugins/") {
client.PullImage(conf.Image, nil)
}
// attempts to create the contianer
id, err := client.CreateContainer(conf, "")
if err != nil {
// and pull the image and re-create if that fails
err = client.PullImage(conf.Image, nil)
if err != nil {
log.Errorf("Error pulling %s. %s\n", conf.Image, err)
return nil, err
}
id, err = client.CreateContainer(conf, "")
if err != nil {
log.Errorf("Error creating %s. %s\n", conf.Image, err)
client.RemoveContainer(id, true, true)
return nil, err
}
}
// fetches the container information
info, err := client.InspectContainer(id)
if err != nil {
log.Errorf("Error inspecting %s. %s\n", conf.Image, err)
client.RemoveContainer(id, true, true)
return nil, err
}
// starts the container
err = client.StartContainer(id, &conf.HostConfig)
if err != nil {
log.Errorf("Error starting daemon %s. %s\n", conf.Image, err)
}
return info, err
}

View file

@ -1,164 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"os"
"os/signal"
"strings"
"syscall"
"time"
log "github.com/drone/drone/Godeps/_workspace/src/github.com/Sirupsen/logrus"
"github.com/drone/drone/Godeps/_workspace/src/github.com/samalba/dockerclient"
common "github.com/drone/drone/pkg/types"
)
var (
clone = flag.Bool("clone", false, "")
build = flag.Bool("build", false, "")
publish = flag.Bool("publish", false, "")
deploy = flag.Bool("deploy", false, "")
notify = flag.Bool("notify", false, "")
debug = flag.Bool("debug", false, "")
)
func main() {
flag.Parse()
if *debug {
log.SetLevel(log.DebugLevel)
}
ctx, err := parseContext()
if err != nil {
log.Errorln("Error launching build container.", err)
os.Exit(1)
return
}
// creates the Docker client, connecting to the
// linked Docker daemon
docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil)
if err != nil {
log.Errorln("Error connecting to build server.", err)
os.Exit(1)
return
}
// creates a wrapper Docker client that uses an ambassador
// container to create a pod-like environment.
client, err := newClient(docker)
if err != nil {
log.Errorln("Error starting build server pod", err)
os.Exit(1)
return
}
ctx.client = client
defer client.Destroy()
// watch for sigkill (timeout or cancel build)
killc := make(chan os.Signal, 1)
signal.Notify(killc, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-killc
log.Println("Cancel request received, killing process")
client.Destroy() // possibe race here. implement lock on the other end
os.Exit(130) // cancel is treated like ctrl+c
}()
go func() {
<-time.After(time.Duration(ctx.Repo.Timeout) * time.Minute)
log.Println("Timeout request received, killing process")
client.Destroy() // possibe race here. implement lock on the other end
os.Exit(128) // cancel is treated like ctrl+c
}()
// performs some initial parsing and pre-processing steps
// prior to executing our build tasks.
createClone(ctx)
err = setup(ctx)
if err != nil {
log.Errorln("Error processing .drone.yml file.", err)
client.Destroy()
os.Exit(1)
return
}
var execs []execFunc
if *clone {
execs = append(execs, execClone)
}
if *build {
execs = append(execs, execSetup)
execs = append(execs, execCompose)
execs = append(execs, execBuild)
}
if *publish {
execs = append(execs, execPublish)
}
if *deploy {
execs = append(execs, execDeploy)
}
// Loop through and execute each step.
for i, exec_ := range execs {
code, err := exec_(ctx)
if err != nil {
fmt.Printf("00%d Error executing build\n", i+1)
fmt.Println(err)
code = 255
}
if code != 0 {
ctx.Job.ExitCode = code
break
}
}
// Optionally execute notification steps.
if *notify {
execNotify(ctx)
}
client.Destroy()
os.Exit(ctx.Job.ExitCode)
}
func createClone(c *Context) {
c.Clone = &common.Clone{
Netrc: c.Netrc,
Keypair: c.Keys,
Remote: c.Repo.Clone,
Origin: c.Repo.Clone,
}
c.Clone.Origin = c.Repo.Clone
c.Clone.Remote = c.Repo.Clone
c.Clone.Sha = c.Build.Commit.Sha
c.Clone.Ref = c.Build.Commit.Ref
c.Clone.Branch = c.Build.Commit.Branch
// TODO do we still need this? it should be set by the remote
if strings.HasPrefix(c.Clone.Branch, "refs/heads/") {
c.Clone.Branch = c.Clone.Branch[11:]
}
// TODO we should also pass the SourceSha, SourceBranch, etc
// to the clone object for merge requests from bitbucket, gitlab, et al
// if len(c.Commit.PullRequest) != 0 {
// }
}
func parseContext() (*Context, error) {
c := &Context{}
for i, arg := range os.Args {
if arg == "--" && len(os.Args) != i+1 {
buf := bytes.NewBufferString(os.Args[i+1])
err := json.NewDecoder(buf).Decode(c)
return c, err
}
}
err := json.NewDecoder(os.Stdin).Decode(c)
return c, err
}

View file

@ -1,167 +0,0 @@
package main
import (
"fmt"
"github.com/drone/drone/Godeps/_workspace/src/github.com/samalba/dockerclient"
common "github.com/drone/drone/pkg/types"
"github.com/drone/drone/pkg/yaml"
"github.com/drone/drone/pkg/yaml/inject"
)
type Context struct {
System *common.System `json:"system"`
Repo *common.Repo `json:"repo"`
Build *common.Build `json:"build"`
Job *common.Job `json:"job"`
Yaml []byte `json:"yaml"`
// todo re-factor these
Clone *common.Clone `json:"clone"`
Keys *common.Keypair `json:"keys"`
Netrc *common.Netrc `json:"netrc"`
Conf *common.Config `json:"-"`
infos []*dockerclient.ContainerInfo
client dockerclient.Client
}
func setup(c *Context) error {
var err error
var opts = parser.Opts{
Network: false,
Privileged: false,
Volumes: false,
Caching: false,
Whitelist: c.System.Plugins,
}
// if repository is trusted the build may specify
// custom volumes, networking and run in trusted mode.
if c.Repo.Trusted {
opts.Network = true
opts.Privileged = true
opts.Volumes = true
opts.Caching = true
}
// if repository is private enable caching
if c.Repo.Private {
opts.Caching = true
}
// inject the matrix parameters into the yaml
injected := inject.Inject(string(c.Yaml), c.Job.Environment)
c.Conf, err = parser.ParseSingle(injected, &opts, c.Repo)
if err != nil {
return err
}
// and append the matrix parameters as environment
// variables for the build
for k, v := range c.Job.Environment {
env := k + "=" + v
c.Conf.Build.Environment = append(c.Conf.Build.Environment, env)
}
// and append drone, jenkins, travis and other
// environment variables that may be of use.
for k, v := range toEnv(c) {
env := k + "=" + v
c.Conf.Build.Environment = append(c.Conf.Build.Environment, env)
}
pathv, ok := c.Conf.Clone.Config["path"]
if ok {
path, ok := pathv.(string)
if ok {
c.Clone.Dir = path
return nil
}
}
return fmt.Errorf("Workspace path not found")
}
type execFunc func(c *Context) (int, error)
func execClone(c *Context) (int, error) {
conf := toContainerConfig(c.Conf.Clone)
conf.Cmd = toCommand(c, c.Conf.Clone)
info, err := run(c.client, conf, c.Conf.Clone.Pull)
if err != nil {
return 255, err
}
return info.State.ExitCode, nil
}
func execBuild(c *Context) (int, error) {
conf := toContainerConfig(c.Conf.Build)
conf.Entrypoint = []string{"/bin/sh", "-e"}
conf.Cmd = []string{"/drone/bin/build.sh"}
info, err := run(c.client, conf, c.Conf.Build.Pull)
if err != nil {
return 255, err
}
return info.State.ExitCode, nil
}
func execSetup(c *Context) (int, error) {
conf := toContainerConfig(c.Conf.Setup)
conf.Cmd = toCommand(c, c.Conf.Setup)
info, err := run(c.client, conf, c.Conf.Setup.Pull)
if err != nil {
return 255, err
}
return info.State.ExitCode, nil
}
func execDeploy(c *Context) (int, error) {
return runSteps(c, c.Conf.Deploy)
}
func execPublish(c *Context) (int, error) {
return runSteps(c, c.Conf.Publish)
}
func execNotify(c *Context) (int, error) {
return runSteps(c, c.Conf.Notify)
}
func execCompose(c *Context) (int, error) {
for _, step := range c.Conf.Compose {
conf := toContainerConfig(step)
_, err := daemon(c.client, conf, step.Pull)
if err != nil {
return 0, err
}
}
return 0, nil
}
func runSteps(c *Context, steps map[string]*common.Step) (int, error) {
for _, step := range steps {
// verify the step matches the branch
// and other specifications
if step.Condition != nil {
if !step.Condition.MatchOwner(c.Repo.Owner) ||
!step.Condition.MatchBranch(c.Clone.Branch) ||
!step.Condition.MatchMatrix(c.Job.Environment) {
continue
}
}
conf := toContainerConfig(step)
conf.Cmd = toCommand(c, step)
// append global environment variables
conf.Env = append(conf.Env, c.System.Globals...)
info, err := run(c.client, conf, step.Pull)
if err != nil {
return 255, err
} else if info.State.ExitCode != 0 {
return info.State.ExitCode, nil
}
}
return 0, nil
}

View file

@ -1,96 +0,0 @@
package main
import (
"encoding/json"
"strconv"
"strings"
"github.com/drone/drone/Godeps/_workspace/src/github.com/samalba/dockerclient"
"github.com/drone/drone/pkg/types"
)
// helper function that converts the build step to
// a containerConfig for use with the dockerclient
func toContainerConfig(step *types.Step) *dockerclient.ContainerConfig {
config := &dockerclient.ContainerConfig{
Image: step.Image,
Env: step.Environment,
Cmd: step.Command,
Entrypoint: step.Entrypoint,
WorkingDir: step.WorkingDir,
HostConfig: dockerclient.HostConfig{
Privileged: step.Privileged,
NetworkMode: step.NetworkMode,
},
}
if len(config.Entrypoint) == 0 {
config.Entrypoint = nil
}
config.Volumes = map[string]struct{}{}
for _, path := range step.Volumes {
if strings.Index(path, ":") == -1 {
continue
}
parts := strings.Split(path, ":")
config.Volumes[parts[1]] = struct{}{}
config.HostConfig.Binds = append(config.HostConfig.Binds, path)
}
return config
}
// helper function to inject drone-specific environment
// variables into the container.
func toEnv(c *Context) map[string]string {
return map[string]string{
"CI": "true",
"BUILD_DIR": c.Clone.Dir,
"BUILD_ID": strconv.Itoa(c.Build.Number),
"BUILD_NUMBER": strconv.Itoa(c.Build.Number),
"JOB_NAME": c.Repo.FullName,
"WORKSPACE": c.Clone.Dir,
"GIT_BRANCH": c.Clone.Branch,
"GIT_COMMIT": c.Clone.Sha,
"DRONE": "true",
"DRONE_REPO": c.Repo.FullName,
"DRONE_BUILD": strconv.Itoa(c.Build.Number),
"DRONE_BRANCH": c.Clone.Branch,
"DRONE_COMMIT": c.Clone.Sha,
"DRONE_DIR": c.Clone.Dir,
}
}
// helper function to encode the build step to
// a json string. Primarily used for plugins, which
// expect a json encoded string in stdin or arg[1].
func toCommand(c *Context, step *types.Step) []string {
p := payload{
c.Repo,
c.Build,
c.Job,
c.Clone,
step.Config,
}
return []string{p.Encode()}
}
// payload represents the payload of a plugin
// that is serialized and sent to the plugin in JSON
// format via stdin or arg[1].
type payload struct {
Repo *types.Repo `json:"repo"`
Build *types.Build `json:"build"`
Job *types.Job `json:"job"`
Clone *types.Clone `json:"clone"`
Config map[string]interface{} `json:"vargs"`
}
// Encode encodes the payload in JSON format.
func (p *payload) Encode() string {
out, _ := json.Marshal(p)
return string(out)
}

View file

@ -23,8 +23,6 @@ import (
_ "github.com/drone/drone/pkg/remote/builtin/github"
_ "github.com/drone/drone/pkg/remote/builtin/gitlab"
_ "github.com/drone/drone/pkg/store/builtin"
_ "net/http/pprof"
)
var (
@ -115,12 +113,7 @@ func main() {
// launch the local queue runner if the system
// is not conifugred to run in agent mode
if len(settings.Agents.Secret) != 0 {
log.Infof("Run builds using remote build agents")
} else {
log.Infof("Run builds using the embedded build runner")
go run(&runner_, queue_)
}
go run(&runner_, queue_)
r := gin.Default()