fixes #1523 to ensure json-file driver

This commit is contained in:
Brad Rydzewski 2017-06-22 15:06:28 -04:00
parent eed03a5803
commit ee793f13ca
10 changed files with 289 additions and 39 deletions

View file

@ -46,7 +46,7 @@ pipeline:
image: plugins/docker image: plugins/docker
repo: drone/drone repo: drone/drone
secrets: [ docker_username, docker_password ] secrets: [ docker_username, docker_password ]
tag: [ 0.7, 0.7.1 ] tag: [ 0.7, 0.7.2 ]
when: when:
event: tag event: tag

View file

@ -1,14 +1,13 @@
<!-- PLEASE READ BEFORE DELETING <!-- PLEASE READ BEFORE DELETING
Bugs or Issues? Please do not open a GitHub issue until you have Bugs or Issues? Please do not open a GitHub issue until you have
discussed and verified on the mailing list: discussed and verified in our chatroom:
http://discourse.drone.io/ https://gitter.im/drone/drone
Failing Builds? Please do not use GitHub issues for generic support Failing Builds? Please do not use GitHub issues for generic support
questions. Instead use the mailing list or Stack Overflow: questions. Instead please use Stack Overflow:
http://discourse.drone.io/
http://stackoverflow.com/questions/tagged/drone.io http://stackoverflow.com/questions/tagged/drone.io
--> -->

View file

@ -148,6 +148,17 @@ func (s *RPC) Next(c context.Context, filter rpc.Filter) (*rpc.Pipeline, error)
return nil, nil return nil, nil
} }
pipeline := new(rpc.Pipeline) pipeline := new(rpc.Pipeline)
// check if the process was previously cancelled
// cancelled, _ := s.checkCancelled(pipeline)
// if cancelled {
// logrus.Debugf("ignore pid %v: cancelled by user", pipeline.ID)
// if derr := s.queue.Done(c, pipeline.ID); derr != nil {
// logrus.Errorf("error: done: cannot ack proc_id %v: %s", pipeline.ID, err)
// }
// return nil, nil
// }
err = json.Unmarshal(task.Data, pipeline) err = json.Unmarshal(task.Data, pipeline)
return pipeline, err return pipeline, err
} }
@ -442,3 +453,18 @@ func (s *RPC) Log(c context.Context, id string, line *rpc.Line) error {
s.logger.Write(c, id, entry) s.logger.Write(c, id, entry)
return nil return nil
} }
func (s *RPC) checkCancelled(pipeline *rpc.Pipeline) (bool, error) {
pid, err := strconv.ParseInt(pipeline.ID, 10, 64)
if err != nil {
return false, err
}
proc, err := s.store.ProcLoad(pid)
if err != nil {
return false, err
}
if proc.State == model.StatusKilled {
return true, nil
}
return false, err
}

View file

@ -44,9 +44,13 @@ func toHostConfig(proc *backend.Step) *container.HostConfig {
Memory: proc.MemLimit, Memory: proc.MemLimit,
MemorySwap: proc.MemSwapLimit, MemorySwap: proc.MemSwapLimit,
}, },
LogConfig: container.LogConfig{
Type: "json-file",
},
Privileged: proc.Privileged, Privileged: proc.Privileged,
ShmSize: proc.ShmSize, ShmSize: proc.ShmSize,
} }
// if len(proc.VolumesFrom) != 0 { // if len(proc.VolumesFrom) != 0 {
// config.VolumesFrom = proc.VolumesFrom // config.VolumesFrom = proc.VolumesFrom
// } // }

View file

@ -0,0 +1,105 @@
package compiler
import (
"path"
"strings"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
libcompose "github.com/docker/libcompose/yaml"
)
// Cacher defines a compiler transform that can be used
// to implement default caching for a repository.
type Cacher interface {
Restore(repo, branch string, mounts []string) *yaml.Container
Rebuild(repo, branch string, mounts []string) *yaml.Container
}
type volumeCacher struct {
base string
}
func (c *volumeCacher) Restore(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/volume-cache:1.0.0",
Vargs: map[string]interface{}{
"mount": mounts,
"path": "/cache",
"restore": true,
"file": strings.Replace(branch, "/", "_", -1) + ".tar",
"fallback_to": "master.tar",
},
Volumes: libcompose.Volumes{
Volumes: []*libcompose.Volume{
{
Source: path.Join(c.base, repo),
Destination: "/cache",
// TODO add access mode
},
},
},
}
}
func (c *volumeCacher) Rebuild(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/volume-cache:1.0.0",
Vargs: map[string]interface{}{
"mount": mounts,
"path": "/cache",
"rebuild": true,
"flush": true,
"file": strings.Replace(branch, "/", "_", -1) + ".tar",
},
Volumes: libcompose.Volumes{
Volumes: []*libcompose.Volume{
{
Source: path.Join(c.base, repo),
Destination: "/cache",
// TODO add access mode
},
},
},
}
}
type s3Cacher struct {
bucket string
access string
secret string
region string
}
func (c *s3Cacher) Restore(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/s3-cache:latest",
Vargs: map[string]interface{}{
"mount": mounts,
"access_key": c.access,
"secret_key": c.secret,
"bucket": c.bucket,
"region": c.region,
"rebuild": true,
},
}
}
func (c *s3Cacher) Rebuild(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/s3-cache:latest",
Vargs: map[string]interface{}{
"mount": mounts,
"access_key": c.access,
"secret_key": c.secret,
"bucket": c.bucket,
"region": c.region,
"rebuild": true,
"flush": true,
},
}
}

View file

@ -6,7 +6,6 @@ import (
"github.com/cncd/pipeline/pipeline/backend" "github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/frontend" "github.com/cncd/pipeline/pipeline/frontend"
"github.com/cncd/pipeline/pipeline/frontend/yaml" "github.com/cncd/pipeline/pipeline/frontend/yaml"
// libcompose "github.com/docker/libcompose/yaml"
) )
// TODO(bradrydzewski) compiler should handle user-defined volumes from YAML // TODO(bradrydzewski) compiler should handle user-defined volumes from YAML
@ -26,6 +25,15 @@ type Secret struct {
Match []string Match []string
} }
type ResourceLimit struct {
MemSwapLimit int64
MemLimit int64
ShmSize int64
CPUQuota int64
CPUShares int64
CPUSet string
}
// Compiler compiles the yaml // Compiler compiles the yaml
type Compiler struct { type Compiler struct {
local bool local bool
@ -39,6 +47,8 @@ type Compiler struct {
metadata frontend.Metadata metadata frontend.Metadata
registries []Registry registries []Registry
secrets map[string]Secret secrets map[string]Secret
cacher Cacher
reslimit ResourceLimit
} }
// New creates a new Compiler with options. // New creates a new Compiler with options.
@ -112,6 +122,8 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
} }
} }
c.setupCache(conf, config)
// add services steps // add services steps
if len(conf.Services.Containers) != 0 { if len(conf.Services.Containers) != 0 {
stage := new(backend.Stage) stage := new(backend.Stage)
@ -158,5 +170,41 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
stage.Steps = append(stage.Steps, step) stage.Steps = append(stage.Steps, step)
} }
c.setupCacheRebuild(conf, config)
return config return config
} }
func (c *Compiler) setupCache(conf *yaml.Config, ir *backend.Config) {
if c.local || len(conf.Cache) == 0 || c.cacher == nil {
return
}
container := c.cacher.Restore(c.metadata.Repo.Name, c.metadata.Curr.Commit.Branch, conf.Cache)
name := fmt.Sprintf("%s_restore_cache", c.prefix)
step := c.createProcess(name, container)
stage := new(backend.Stage)
stage.Name = name
stage.Alias = "restore_cache"
stage.Steps = append(stage.Steps, step)
ir.Stages = append(ir.Stages, stage)
}
func (c *Compiler) setupCacheRebuild(conf *yaml.Config, ir *backend.Config) {
if c.local || len(conf.Cache) == 0 || c.metadata.Curr.Event != "push" || c.cacher == nil {
return
}
container := c.cacher.Rebuild(c.metadata.Repo.Name, c.metadata.Curr.Commit.Branch, conf.Cache)
name := fmt.Sprintf("%s_rebuild_cache", c.prefix)
step := c.createProcess(name, container)
stage := new(backend.Stage)
stage.Name = name
stage.Alias = "rebuild_cache"
stage.Steps = append(stage.Steps, step)
ir.Stages = append(ir.Stages, stage)
}

View file

@ -109,6 +109,31 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
} }
} }
memSwapLimit := int64(container.MemSwapLimit)
if c.reslimit.MemSwapLimit != 0 {
memSwapLimit = c.reslimit.MemSwapLimit
}
memLimit := int64(container.MemLimit)
if c.reslimit.MemLimit != 0 {
memLimit = c.reslimit.MemLimit
}
shmSize := int64(container.ShmSize)
if c.reslimit.ShmSize != 0 {
shmSize = c.reslimit.ShmSize
}
cpuQuota := int64(container.CPUQuota)
if c.reslimit.CPUQuota != 0 {
cpuQuota = c.reslimit.CPUQuota
}
cpuShares := int64(container.CPUShares)
if c.reslimit.CPUShares != 0 {
cpuShares = c.reslimit.CPUShares
}
cpuSet := container.CPUSet
if c.reslimit.CPUSet != "" {
cpuSet = c.reslimit.CPUSet
}
return &backend.Step{ return &backend.Step{
Name: name, Name: name,
Alias: container.Name, Alias: container.Name,
@ -127,12 +152,12 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
Networks: networks, Networks: networks,
DNS: container.DNS, DNS: container.DNS,
DNSSearch: container.DNSSearch, DNSSearch: container.DNSSearch,
MemSwapLimit: int64(container.MemSwapLimit), MemSwapLimit: memSwapLimit,
MemLimit: int64(container.MemLimit), MemLimit: memLimit,
ShmSize: int64(container.ShmSize), ShmSize: shmSize,
CPUQuota: int64(container.CPUQuota), CPUQuota: cpuQuota,
CPUShares: int64(container.CPUShares), CPUShares: cpuShares,
CPUSet: container.CPUSet, CPUSet: cpuSet,
AuthConfig: authConfig, AuthConfig: authConfig,
OnSuccess: container.Constraints.Status.Match("success"), OnSuccess: container.Constraints.Status.Match("success"),
OnFailure: (len(container.Constraints.Status.Include)+ OnFailure: (len(container.Constraints.Status.Include)+

View file

@ -145,6 +145,34 @@ func WithEnviron(env map[string]string) Option {
} }
} }
// WithCacher configures the compiler with default cache settings.
func WithCacher(cacher Cacher) Option {
return func(compiler *Compiler) {
compiler.cacher = cacher
}
}
// WithVolumeCacher configures the compiler with default local volume
// caching enabled.
func WithVolumeCacher(base string) Option {
return func(compiler *Compiler) {
compiler.cacher = &volumeCacher{base: base}
}
}
// WithS3Cacher configures the compiler with default amazon s3
// caching enabled.
func WithS3Cacher(access, secret, region, bucket string) Option {
return func(compiler *Compiler) {
compiler.cacher = &s3Cacher{
access: access,
secret: secret,
bucket: bucket,
region: region,
}
}
}
// WithProxy configures the compiler with HTTP_PROXY, HTTPS_PROXY, // WithProxy configures the compiler with HTTP_PROXY, HTTPS_PROXY,
// and NO_PROXY environment variables added by default to every // and NO_PROXY environment variables added by default to every
// container in the pipeline. // container in the pipeline.
@ -169,6 +197,21 @@ func WithNetworks(networks ...string) Option {
} }
} }
// WithResourceLimit configures the compiler with default resource limits that
// are applied each container in the pipeline.
func WithResourceLimit(swap, mem, shmsize, cpuQuota, cpuShares int64, cpuSet string) Option {
return func(compiler *Compiler) {
compiler.reslimit = ResourceLimit{
MemSwapLimit: swap,
MemLimit: mem,
ShmSize: shmsize,
CPUQuota: cpuQuota,
CPUShares: cpuShares,
CPUSet: cpuSet,
}
}
}
// TODO(bradrydzewski) consider an alternate approach to // TODO(bradrydzewski) consider an alternate approach to
// WithProxy where the proxy strings are passed directly // WithProxy where the proxy strings are passed directly
// to the function as named parameters. // to the function as named parameters.

52
vendor/vendor.json vendored
View file

@ -39,74 +39,74 @@
{ {
"checksumSHA1": "W3AuK8ocqHwlUajGmQLFvnRhTZE=", "checksumSHA1": "W3AuK8ocqHwlUajGmQLFvnRhTZE=",
"path": "github.com/cncd/pipeline/pipeline", "path": "github.com/cncd/pipeline/pipeline",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "rO+djTfB4LrT+FBbpotyUUobOtU=", "checksumSHA1": "rO+djTfB4LrT+FBbpotyUUobOtU=",
"path": "github.com/cncd/pipeline/pipeline/backend", "path": "github.com/cncd/pipeline/pipeline/backend",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "gLqopO27JUHpxbV+jxggCMzqROY=", "checksumSHA1": "DzP4c915B+gJTE5RCKQHzxwrUg4=",
"path": "github.com/cncd/pipeline/pipeline/backend/docker", "path": "github.com/cncd/pipeline/pipeline/backend/docker",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "8Hj/OZnYZyz5N2hqENCTTaGtkNQ=", "checksumSHA1": "8Hj/OZnYZyz5N2hqENCTTaGtkNQ=",
"path": "github.com/cncd/pipeline/pipeline/frontend", "path": "github.com/cncd/pipeline/pipeline/frontend",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "9KYIsY8WlWbrRAP7caEpWT70P9c=", "checksumSHA1": "9KYIsY8WlWbrRAP7caEpWT70P9c=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml", "path": "github.com/cncd/pipeline/pipeline/frontend/yaml",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "Pyldit0XriIzyFUmnvjPrghJLzw=", "checksumSHA1": "1saqrg1gk6F2N0x2rOCTnSfwBAY=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/compiler", "path": "github.com/cncd/pipeline/pipeline/frontend/yaml/compiler",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "Q0GkNUFamVYIA1Fd8r0A5M6Gx54=", "checksumSHA1": "Q0GkNUFamVYIA1Fd8r0A5M6Gx54=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/linter", "path": "github.com/cncd/pipeline/pipeline/frontend/yaml/linter",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "kx2sPUIMozPC/g6E4w48h3FfH3k=", "checksumSHA1": "kx2sPUIMozPC/g6E4w48h3FfH3k=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/matrix", "path": "github.com/cncd/pipeline/pipeline/frontend/yaml/matrix",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "L7Q5qJmPITNmvFEEaj5MPwCWFRk=", "checksumSHA1": "L7Q5qJmPITNmvFEEaj5MPwCWFRk=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/types", "path": "github.com/cncd/pipeline/pipeline/frontend/yaml/types",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "2/3f3oNmxXy5kcrRLCFa24Oc9O4=", "checksumSHA1": "2/3f3oNmxXy5kcrRLCFa24Oc9O4=",
"path": "github.com/cncd/pipeline/pipeline/interrupt", "path": "github.com/cncd/pipeline/pipeline/interrupt",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "uOjTfke7Qxosrivgz/nVTHeIP5g=", "checksumSHA1": "uOjTfke7Qxosrivgz/nVTHeIP5g=",
"path": "github.com/cncd/pipeline/pipeline/multipart", "path": "github.com/cncd/pipeline/pipeline/multipart",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "vWImaniGEUutEvLvNCzTpSRSArg=", "checksumSHA1": "vWImaniGEUutEvLvNCzTpSRSArg=",
"path": "github.com/cncd/pipeline/pipeline/rpc", "path": "github.com/cncd/pipeline/pipeline/rpc",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080", "revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-03T15:27:13Z" "revisionTime": "2017-06-21T22:24:30Z"
}, },
{ {
"checksumSHA1": "7Qj1DK0ceAXkYztW0l3+L6sn+V8=", "checksumSHA1": "7Qj1DK0ceAXkYztW0l3+L6sn+V8=",

View file

@ -8,7 +8,7 @@ var (
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor int64 = 7 VersionMinor int64 = 7
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch int64 = 1 VersionPatch int64 = 2
// VersionPre indicates prerelease // VersionPre indicates prerelease
VersionPre string VersionPre string
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.