Added support for step errors when executing backend (#817)

When executing a backend step, in case of failure of the specific step, the run is marked as errored but the step error is missing.

Added:
1. Log for the backend error (without trace)
2. Mark the step as errored with exit code 126 (Could not execute).

Co-authored-by: Zav Shotan <zshotan@bloomberg.net>
Co-authored-by: Anton Bracke <anton@ju60.de>
This commit is contained in:
Zav Shotan 2022-05-11 07:40:44 -04:00 committed by GitHub
parent bdb007e064
commit acbcc53872
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 111 additions and 78 deletions

View file

@ -31,6 +31,7 @@ import (
backend "github.com/woodpecker-ci/woodpecker/pipeline/backend/types" backend "github.com/woodpecker-ci/woodpecker/pipeline/backend/types"
"github.com/woodpecker-ci/woodpecker/pipeline/multipart" "github.com/woodpecker-ci/woodpecker/pipeline/multipart"
"github.com/woodpecker-ci/woodpecker/pipeline/rpc" "github.com/woodpecker-ci/woodpecker/pipeline/rpc"
"github.com/woodpecker-ci/woodpecker/shared/utils"
) )
// TODO: Implement log streaming. // TODO: Implement log streaming.
@ -98,6 +99,13 @@ func (r *Runner) Run(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctxmeta, timeout) ctx, cancel := context.WithTimeout(ctxmeta, timeout)
defer cancel() defer cancel()
// Add sigterm support for internal context.
// Required when the pipeline is terminated by external signals
// like kubernetes.
ctx = utils.WithContextSigtermCallback(ctx, func() {
logger.Error().Msg("Received sigterm termination signal")
})
canceled := abool.New() canceled := abool.New()
go func() { go func() {
logger.Debug().Msg("listen for cancel signal") logger.Debug().Msg("listen for cancel signal")
@ -241,6 +249,7 @@ func (r *Runner) Run(ctx context.Context) error {
proclogger := logger.With(). proclogger := logger.With().
Str("image", state.Pipeline.Step.Image). Str("image", state.Pipeline.Step.Image).
Str("stage", state.Pipeline.Step.Alias). Str("stage", state.Pipeline.Step.Alias).
Err(state.Process.Error).
Int("exit_code", state.Process.ExitCode). Int("exit_code", state.Process.ExitCode).
Bool("exited", state.Process.Exited). Bool("exited", state.Process.Exited).
Logger() Logger()
@ -252,6 +261,10 @@ func (r *Runner) Run(ctx context.Context) error {
Started: time.Now().Unix(), // TODO do not do this Started: time.Now().Unix(), // TODO do not do this
Finished: time.Now().Unix(), Finished: time.Now().Unix(),
} }
if state.Process.Error != nil {
procState.Error = state.Process.Error.Error()
}
defer func() { defer func() {
proclogger.Debug().Msg("update step status") proclogger.Debug().Msg("update step status")

View file

@ -8,4 +8,6 @@ type State struct {
Exited bool `json:"exited"` Exited bool `json:"exited"`
// Container is oom killed, true or false // Container is oom killed, true or false
OOMKilled bool `json:"oom_killed"` OOMKilled bool `json:"oom_killed"`
// Container error
Error error
} }

View file

@ -54,11 +54,11 @@ func New(spec *backend.Config, opts ...Option) *Runtime {
return r return r
} }
// Run starts the runtime and waits for it to complete. // Starts the execution of the pipeline and waits for it to complete
func (r *Runtime) Run() error { func (r *Runtime) Run() error {
defer func() { defer func() {
if err := r.engine.Destroy(r.ctx, r.spec); err != nil { if err := r.engine.Destroy(r.ctx, r.spec); err != nil {
log.Error().Err(err).Msg("could not destroy engine") log.Error().Err(err).Msg("could not destroy pipeline")
} }
}() }()
@ -81,18 +81,66 @@ func (r *Runtime) Run() error {
return r.err return r.err
} }
// // Updates the current status of a step
// func (r *Runtime) traceStep(processState *backend.State, err error, step *backend.Step) error {
// if r.tracer == nil {
// no tracer nothing to trace :)
return nil
}
func (r *Runtime) execAll(procs []*backend.Step) <-chan error { if processState == nil {
processState = new(backend.State)
if err != nil {
processState.Error = err
processState.Exited = true
processState.OOMKilled = false
processState.ExitCode = 126 // command invoked cannot be executed.
}
}
state := new(State)
state.Pipeline.Time = r.started
state.Pipeline.Step = step
state.Process = processState // empty
state.Pipeline.Error = r.err
return r.tracer.Trace(state)
}
// Executes a set of parallel steps
func (r *Runtime) execAll(steps []*backend.Step) <-chan error {
var g errgroup.Group var g errgroup.Group
done := make(chan error) done := make(chan error)
for _, proc := range procs { for _, step := range steps {
proc := proc // required since otherwise the loop variable
// will be captured by the function. This will
// recreate the step "variable"
step := step
g.Go(func() error { g.Go(func() error {
return r.exec(proc) // Case the pipeline was already complete.
switch {
case r.err != nil && !step.OnFailure:
return nil
case r.err == nil && !step.OnSuccess:
return nil
}
// Trace started.
err := r.traceStep(nil, nil, step)
if err != nil {
return err
}
processState, err := r.exec(step)
// Return the error after tracing it.
traceErr := r.traceStep(processState, err, step)
if traceErr != nil {
return traceErr
}
return err
}) })
} }
@ -103,86 +151,54 @@ func (r *Runtime) execAll(procs []*backend.Step) <-chan error {
return done return done
} }
// // Executes the step and returns the state and error.
// func (r *Runtime) exec(step *backend.Step) (*backend.State, error) {
//
func (r *Runtime) exec(proc *backend.Step) error {
switch {
case r.err != nil && !proc.OnFailure:
return nil
case r.err == nil && !proc.OnSuccess:
return nil
}
if r.tracer != nil {
state := new(State)
state.Pipeline.Time = r.started
state.Pipeline.Error = r.err
state.Pipeline.Step = proc
state.Process = new(backend.State) // empty
if err := r.tracer.Trace(state); err == ErrSkip {
return nil
} else if err != nil {
return err
}
}
// TODO: using DRONE_ will be deprecated with 0.15.0. remove fallback with following release // TODO: using DRONE_ will be deprecated with 0.15.0. remove fallback with following release
for key, value := range proc.Environment { for key, value := range step.Environment {
if strings.HasPrefix(key, "CI_") { if strings.HasPrefix(key, "CI_") {
proc.Environment[strings.Replace(key, "CI_", "DRONE_", 1)] = value step.Environment[strings.Replace(key, "CI_", "DRONE_", 1)] = value
} }
} }
if err := r.engine.Exec(r.ctx, proc); err != nil { if err := r.engine.Exec(r.ctx, step); err != nil {
return err return nil, err
} }
if r.logger != nil { if r.logger != nil {
rc, err := r.engine.Tail(r.ctx, proc) rc, err := r.engine.Tail(r.ctx, step)
if err != nil { if err != nil {
return err return nil, err
} }
go func() { go func() {
if err := r.logger.Log(proc, multipart.New(rc)); err != nil { if err := r.logger.Log(step, multipart.New(rc)); err != nil {
log.Error().Err(err).Msg("process logging failed") log.Error().Err(err).Msg("process logging failed")
} }
_ = rc.Close() _ = rc.Close()
}() }()
} }
if proc.Detached { // nothing else to do, this is a detached process.
return nil if step.Detached {
return nil, nil
} }
wait, err := r.engine.Wait(r.ctx, proc) waitState, err := r.engine.Wait(r.ctx, step)
if err != nil { if err != nil {
return err return nil, err
} }
if r.tracer != nil { if waitState.OOMKilled {
state := new(State) return waitState, &OomError{
state.Pipeline.Time = r.started Name: step.Name,
state.Pipeline.Error = r.err Code: waitState.ExitCode,
state.Pipeline.Step = proc }
state.Process = wait } else if waitState.ExitCode != 0 {
if err := r.tracer.Trace(state); err != nil { return waitState, &ExitError{
return err Name: step.Name,
Code: waitState.ExitCode,
} }
} }
if wait.OOMKilled { return waitState, nil
return &OomError{
Name: proc.Name,
Code: wait.ExitCode,
}
} else if wait.ExitCode != 0 {
return &ExitError{
Name: proc.Name,
Code: wait.ExitCode,
}
}
return nil
} }

View file

@ -8,15 +8,17 @@
<Icon name="close" class="ml-auto" /> <Icon name="close" class="ml-auto" />
</div> </div>
<div v-for="logLine in logLines" :key="logLine.pos" class="flex items-center"> <template v-if="!proc?.error">
<div class="text-gray-500 text-sm w-4">{{ (logLine.pos || 0) + 1 }}</div> <div v-for="logLine in logLines" :key="logLine.pos" class="flex items-center">
<!-- eslint-disable-next-line vue/no-v-html --> <div class="text-gray-500 text-sm w-4">{{ (logLine.pos || 0) + 1 }}</div>
<div class="mx-4 text-gray-200 dark:text-gray-400" v-html="logLine.out" /> <!-- eslint-disable-next-line vue/no-v-html -->
<div class="ml-auto text-gray-500 text-sm">{{ logLine.time || 0 }}s</div> <div class="mx-4 text-gray-200 dark:text-gray-400" v-html="logLine.out" />
</div> <div class="ml-auto text-gray-500 text-sm">{{ logLine.time || 0 }}s</div>
<div v-if="proc?.end_time !== undefined" class="text-gray-500 text-sm mt-4 ml-8"> </div>
exit code {{ proc.exit_code }} <div v-if="proc?.end_time !== undefined" class="text-gray-500 text-sm mt-4 ml-8">
</div> exit code {{ proc.exit_code }}
</div>
</template>
<div class="text-gray-300 mx-auto"> <div class="text-gray-300 mx-auto">
<span v-if="proc?.error" class="text-red-500">{{ proc.error }}</span> <span v-if="proc?.error" class="text-red-500">{{ proc.error }}</span>

View file

@ -35,11 +35,11 @@ export default () => {
return; return;
} }
if (isProcFinished(_proc)) { if (_proc.error) {
logs.value = undefined;
} else if (isProcFinished(_proc)) {
logs.value = await apiClient.getLogs(owner, repo, build, _proc.pid); logs.value = await apiClient.getLogs(owner, repo, build, _proc.pid);
} } else if (isProcRunning(_proc)) {
if (isProcRunning(_proc)) {
// load stream of parent process (which receives all child processes logs) // load stream of parent process (which receives all child processes logs)
stream = apiClient.streamLogs(owner, repo, build, _proc.ppid, onLogsUpdate); stream = apiClient.streamLogs(owner, repo, build, _proc.ppid, onLogsUpdate);
} }