fixes #1523 to ensure json-file driver (#2083)

This commit is contained in:
Brad Rydzewski 2017-06-22 15:11:05 -04:00 committed by GitHub
parent b833b16f32
commit 3ed811644f
10 changed files with 289 additions and 39 deletions

View file

@ -46,7 +46,7 @@ pipeline:
image: plugins/docker
repo: drone/drone
secrets: [ docker_username, docker_password ]
tag: [ 0.7, 0.7.1 ]
tag: [ 0.7, 0.7.2 ]
when:
event: tag

View file

@ -1,14 +1,13 @@
<!-- PLEASE READ BEFORE DELETING
Bugs or Issues? Please do not open a GitHub issue until you have
discussed and verified on the mailing list:
discussed and verified in our chatroom:
http://discourse.drone.io/
https://gitter.im/drone/drone
Failing Builds? Please do not use GitHub issues for generic support
questions. Instead use the mailing list or Stack Overflow:
questions. Instead please use Stack Overflow:
http://discourse.drone.io/
http://stackoverflow.com/questions/tagged/drone.io
-->

View file

@ -148,6 +148,17 @@ func (s *RPC) Next(c context.Context, filter rpc.Filter) (*rpc.Pipeline, error)
return nil, nil
}
pipeline := new(rpc.Pipeline)
// check if the process was previously cancelled
// cancelled, _ := s.checkCancelled(pipeline)
// if cancelled {
// logrus.Debugf("ignore pid %v: cancelled by user", pipeline.ID)
// if derr := s.queue.Done(c, pipeline.ID); derr != nil {
// logrus.Errorf("error: done: cannot ack proc_id %v: %s", pipeline.ID, err)
// }
// return nil, nil
// }
err = json.Unmarshal(task.Data, pipeline)
return pipeline, err
}
@ -442,3 +453,18 @@ func (s *RPC) Log(c context.Context, id string, line *rpc.Line) error {
s.logger.Write(c, id, entry)
return nil
}
func (s *RPC) checkCancelled(pipeline *rpc.Pipeline) (bool, error) {
pid, err := strconv.ParseInt(pipeline.ID, 10, 64)
if err != nil {
return false, err
}
proc, err := s.store.ProcLoad(pid)
if err != nil {
return false, err
}
if proc.State == model.StatusKilled {
return true, nil
}
return false, err
}

View file

@ -44,9 +44,13 @@ func toHostConfig(proc *backend.Step) *container.HostConfig {
Memory: proc.MemLimit,
MemorySwap: proc.MemSwapLimit,
},
LogConfig: container.LogConfig{
Type: "json-file",
},
Privileged: proc.Privileged,
ShmSize: proc.ShmSize,
}
// if len(proc.VolumesFrom) != 0 {
// config.VolumesFrom = proc.VolumesFrom
// }

View file

@ -0,0 +1,105 @@
package compiler
import (
"path"
"strings"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
libcompose "github.com/docker/libcompose/yaml"
)
// Cacher defines a compiler transform that can be used
// to implement default caching for a repository.
type Cacher interface {
Restore(repo, branch string, mounts []string) *yaml.Container
Rebuild(repo, branch string, mounts []string) *yaml.Container
}
type volumeCacher struct {
base string
}
func (c *volumeCacher) Restore(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/volume-cache:1.0.0",
Vargs: map[string]interface{}{
"mount": mounts,
"path": "/cache",
"restore": true,
"file": strings.Replace(branch, "/", "_", -1) + ".tar",
"fallback_to": "master.tar",
},
Volumes: libcompose.Volumes{
Volumes: []*libcompose.Volume{
{
Source: path.Join(c.base, repo),
Destination: "/cache",
// TODO add access mode
},
},
},
}
}
func (c *volumeCacher) Rebuild(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/volume-cache:1.0.0",
Vargs: map[string]interface{}{
"mount": mounts,
"path": "/cache",
"rebuild": true,
"flush": true,
"file": strings.Replace(branch, "/", "_", -1) + ".tar",
},
Volumes: libcompose.Volumes{
Volumes: []*libcompose.Volume{
{
Source: path.Join(c.base, repo),
Destination: "/cache",
// TODO add access mode
},
},
},
}
}
type s3Cacher struct {
bucket string
access string
secret string
region string
}
func (c *s3Cacher) Restore(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/s3-cache:latest",
Vargs: map[string]interface{}{
"mount": mounts,
"access_key": c.access,
"secret_key": c.secret,
"bucket": c.bucket,
"region": c.region,
"rebuild": true,
},
}
}
func (c *s3Cacher) Rebuild(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/s3-cache:latest",
Vargs: map[string]interface{}{
"mount": mounts,
"access_key": c.access,
"secret_key": c.secret,
"bucket": c.bucket,
"region": c.region,
"rebuild": true,
"flush": true,
},
}
}

View file

@ -6,7 +6,6 @@ import (
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/frontend"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
// libcompose "github.com/docker/libcompose/yaml"
)
// TODO(bradrydzewski) compiler should handle user-defined volumes from YAML
@ -26,6 +25,15 @@ type Secret struct {
Match []string
}
type ResourceLimit struct {
MemSwapLimit int64
MemLimit int64
ShmSize int64
CPUQuota int64
CPUShares int64
CPUSet string
}
// Compiler compiles the yaml
type Compiler struct {
local bool
@ -39,6 +47,8 @@ type Compiler struct {
metadata frontend.Metadata
registries []Registry
secrets map[string]Secret
cacher Cacher
reslimit ResourceLimit
}
// New creates a new Compiler with options.
@ -112,6 +122,8 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
}
}
c.setupCache(conf, config)
// add services steps
if len(conf.Services.Containers) != 0 {
stage := new(backend.Stage)
@ -158,5 +170,41 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
stage.Steps = append(stage.Steps, step)
}
c.setupCacheRebuild(conf, config)
return config
}
func (c *Compiler) setupCache(conf *yaml.Config, ir *backend.Config) {
if c.local || len(conf.Cache) == 0 || c.cacher == nil {
return
}
container := c.cacher.Restore(c.metadata.Repo.Name, c.metadata.Curr.Commit.Branch, conf.Cache)
name := fmt.Sprintf("%s_restore_cache", c.prefix)
step := c.createProcess(name, container)
stage := new(backend.Stage)
stage.Name = name
stage.Alias = "restore_cache"
stage.Steps = append(stage.Steps, step)
ir.Stages = append(ir.Stages, stage)
}
func (c *Compiler) setupCacheRebuild(conf *yaml.Config, ir *backend.Config) {
if c.local || len(conf.Cache) == 0 || c.metadata.Curr.Event != "push" || c.cacher == nil {
return
}
container := c.cacher.Rebuild(c.metadata.Repo.Name, c.metadata.Curr.Commit.Branch, conf.Cache)
name := fmt.Sprintf("%s_rebuild_cache", c.prefix)
step := c.createProcess(name, container)
stage := new(backend.Stage)
stage.Name = name
stage.Alias = "rebuild_cache"
stage.Steps = append(stage.Steps, step)
ir.Stages = append(ir.Stages, stage)
}

View file

@ -109,6 +109,31 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
}
}
memSwapLimit := int64(container.MemSwapLimit)
if c.reslimit.MemSwapLimit != 0 {
memSwapLimit = c.reslimit.MemSwapLimit
}
memLimit := int64(container.MemLimit)
if c.reslimit.MemLimit != 0 {
memLimit = c.reslimit.MemLimit
}
shmSize := int64(container.ShmSize)
if c.reslimit.ShmSize != 0 {
shmSize = c.reslimit.ShmSize
}
cpuQuota := int64(container.CPUQuota)
if c.reslimit.CPUQuota != 0 {
cpuQuota = c.reslimit.CPUQuota
}
cpuShares := int64(container.CPUShares)
if c.reslimit.CPUShares != 0 {
cpuShares = c.reslimit.CPUShares
}
cpuSet := container.CPUSet
if c.reslimit.CPUSet != "" {
cpuSet = c.reslimit.CPUSet
}
return &backend.Step{
Name: name,
Alias: container.Name,
@ -127,12 +152,12 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
Networks: networks,
DNS: container.DNS,
DNSSearch: container.DNSSearch,
MemSwapLimit: int64(container.MemSwapLimit),
MemLimit: int64(container.MemLimit),
ShmSize: int64(container.ShmSize),
CPUQuota: int64(container.CPUQuota),
CPUShares: int64(container.CPUShares),
CPUSet: container.CPUSet,
MemSwapLimit: memSwapLimit,
MemLimit: memLimit,
ShmSize: shmSize,
CPUQuota: cpuQuota,
CPUShares: cpuShares,
CPUSet: cpuSet,
AuthConfig: authConfig,
OnSuccess: container.Constraints.Status.Match("success"),
OnFailure: (len(container.Constraints.Status.Include)+

View file

@ -145,6 +145,34 @@ func WithEnviron(env map[string]string) Option {
}
}
// WithCacher configures the compiler with default cache settings.
func WithCacher(cacher Cacher) Option {
return func(compiler *Compiler) {
compiler.cacher = cacher
}
}
// WithVolumeCacher configures the compiler with default local volume
// caching enabled.
func WithVolumeCacher(base string) Option {
return func(compiler *Compiler) {
compiler.cacher = &volumeCacher{base: base}
}
}
// WithS3Cacher configures the compiler with default amazon s3
// caching enabled.
func WithS3Cacher(access, secret, region, bucket string) Option {
return func(compiler *Compiler) {
compiler.cacher = &s3Cacher{
access: access,
secret: secret,
bucket: bucket,
region: region,
}
}
}
// WithProxy configures the compiler with HTTP_PROXY, HTTPS_PROXY,
// and NO_PROXY environment variables added by default to every
// container in the pipeline.
@ -169,6 +197,21 @@ func WithNetworks(networks ...string) Option {
}
}
// WithResourceLimit configures the compiler with default resource limits that
// are applied each container in the pipeline.
func WithResourceLimit(swap, mem, shmsize, cpuQuota, cpuShares int64, cpuSet string) Option {
return func(compiler *Compiler) {
compiler.reslimit = ResourceLimit{
MemSwapLimit: swap,
MemLimit: mem,
ShmSize: shmsize,
CPUQuota: cpuQuota,
CPUShares: cpuShares,
CPUSet: cpuSet,
}
}
}
// TODO(bradrydzewski) consider an alternate approach to
// WithProxy where the proxy strings are passed directly
// to the function as named parameters.

52
vendor/vendor.json vendored
View file

@ -39,74 +39,74 @@
{
"checksumSHA1": "W3AuK8ocqHwlUajGmQLFvnRhTZE=",
"path": "github.com/cncd/pipeline/pipeline",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "rO+djTfB4LrT+FBbpotyUUobOtU=",
"path": "github.com/cncd/pipeline/pipeline/backend",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "gLqopO27JUHpxbV+jxggCMzqROY=",
"checksumSHA1": "DzP4c915B+gJTE5RCKQHzxwrUg4=",
"path": "github.com/cncd/pipeline/pipeline/backend/docker",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "8Hj/OZnYZyz5N2hqENCTTaGtkNQ=",
"path": "github.com/cncd/pipeline/pipeline/frontend",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "9KYIsY8WlWbrRAP7caEpWT70P9c=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "Pyldit0XriIzyFUmnvjPrghJLzw=",
"checksumSHA1": "1saqrg1gk6F2N0x2rOCTnSfwBAY=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/compiler",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "Q0GkNUFamVYIA1Fd8r0A5M6Gx54=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/linter",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "kx2sPUIMozPC/g6E4w48h3FfH3k=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/matrix",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "L7Q5qJmPITNmvFEEaj5MPwCWFRk=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/types",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "2/3f3oNmxXy5kcrRLCFa24Oc9O4=",
"path": "github.com/cncd/pipeline/pipeline/interrupt",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "uOjTfke7Qxosrivgz/nVTHeIP5g=",
"path": "github.com/cncd/pipeline/pipeline/multipart",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "vWImaniGEUutEvLvNCzTpSRSArg=",
"path": "github.com/cncd/pipeline/pipeline/rpc",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "0a4a480b4a4ceca5f6bf3ed56d29b5ad3ca1c53a",
"revisionTime": "2017-06-21T22:24:30Z"
},
{
"checksumSHA1": "7Qj1DK0ceAXkYztW0l3+L6sn+V8=",

View file

@ -8,7 +8,7 @@ var (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor int64 = 7
// VersionPatch is for backwards-compatible bug fixes
VersionPatch int64 = 1
VersionPatch int64 = 2
// VersionPre indicates prerelease
VersionPre string
// VersionDev indicates development branch. Releases will be empty string.