mirror of
https://github.com/woodpecker-ci/woodpecker.git
synced 2025-01-03 14:18:42 +00:00
Added support for step errors when executing backend (#817)
When executing a backend step, in case of failure of the specific step, the run is marked as errored but the step error is missing. Added: 1. Log for the backend error (without trace) 2. Mark the step as errored with exit code 126 (Could not execute). Co-authored-by: Zav Shotan <zshotan@bloomberg.net> Co-authored-by: Anton Bracke <anton@ju60.de>
This commit is contained in:
parent
bdb007e064
commit
acbcc53872
5 changed files with 111 additions and 78 deletions
|
@ -31,6 +31,7 @@ import (
|
|||
backend "github.com/woodpecker-ci/woodpecker/pipeline/backend/types"
|
||||
"github.com/woodpecker-ci/woodpecker/pipeline/multipart"
|
||||
"github.com/woodpecker-ci/woodpecker/pipeline/rpc"
|
||||
"github.com/woodpecker-ci/woodpecker/shared/utils"
|
||||
)
|
||||
|
||||
// TODO: Implement log streaming.
|
||||
|
@ -98,6 +99,13 @@ func (r *Runner) Run(ctx context.Context) error {
|
|||
ctx, cancel := context.WithTimeout(ctxmeta, timeout)
|
||||
defer cancel()
|
||||
|
||||
// Add sigterm support for internal context.
|
||||
// Required when the pipeline is terminated by external signals
|
||||
// like kubernetes.
|
||||
ctx = utils.WithContextSigtermCallback(ctx, func() {
|
||||
logger.Error().Msg("Received sigterm termination signal")
|
||||
})
|
||||
|
||||
canceled := abool.New()
|
||||
go func() {
|
||||
logger.Debug().Msg("listen for cancel signal")
|
||||
|
@ -241,6 +249,7 @@ func (r *Runner) Run(ctx context.Context) error {
|
|||
proclogger := logger.With().
|
||||
Str("image", state.Pipeline.Step.Image).
|
||||
Str("stage", state.Pipeline.Step.Alias).
|
||||
Err(state.Process.Error).
|
||||
Int("exit_code", state.Process.ExitCode).
|
||||
Bool("exited", state.Process.Exited).
|
||||
Logger()
|
||||
|
@ -252,6 +261,10 @@ func (r *Runner) Run(ctx context.Context) error {
|
|||
Started: time.Now().Unix(), // TODO do not do this
|
||||
Finished: time.Now().Unix(),
|
||||
}
|
||||
if state.Process.Error != nil {
|
||||
procState.Error = state.Process.Error.Error()
|
||||
}
|
||||
|
||||
defer func() {
|
||||
proclogger.Debug().Msg("update step status")
|
||||
|
||||
|
|
|
@ -8,4 +8,6 @@ type State struct {
|
|||
Exited bool `json:"exited"`
|
||||
// Container is oom killed, true or false
|
||||
OOMKilled bool `json:"oom_killed"`
|
||||
// Container error
|
||||
Error error
|
||||
}
|
||||
|
|
|
@ -54,11 +54,11 @@ func New(spec *backend.Config, opts ...Option) *Runtime {
|
|||
return r
|
||||
}
|
||||
|
||||
// Run starts the runtime and waits for it to complete.
|
||||
// Starts the execution of the pipeline and waits for it to complete
|
||||
func (r *Runtime) Run() error {
|
||||
defer func() {
|
||||
if err := r.engine.Destroy(r.ctx, r.spec); err != nil {
|
||||
log.Error().Err(err).Msg("could not destroy engine")
|
||||
log.Error().Err(err).Msg("could not destroy pipeline")
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -81,18 +81,66 @@ func (r *Runtime) Run() error {
|
|||
return r.err
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
// Updates the current status of a step
|
||||
func (r *Runtime) traceStep(processState *backend.State, err error, step *backend.Step) error {
|
||||
if r.tracer == nil {
|
||||
// no tracer nothing to trace :)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runtime) execAll(procs []*backend.Step) <-chan error {
|
||||
if processState == nil {
|
||||
processState = new(backend.State)
|
||||
if err != nil {
|
||||
processState.Error = err
|
||||
processState.Exited = true
|
||||
processState.OOMKilled = false
|
||||
processState.ExitCode = 126 // command invoked cannot be executed.
|
||||
}
|
||||
}
|
||||
|
||||
state := new(State)
|
||||
state.Pipeline.Time = r.started
|
||||
state.Pipeline.Step = step
|
||||
state.Process = processState // empty
|
||||
state.Pipeline.Error = r.err
|
||||
|
||||
return r.tracer.Trace(state)
|
||||
}
|
||||
|
||||
// Executes a set of parallel steps
|
||||
func (r *Runtime) execAll(steps []*backend.Step) <-chan error {
|
||||
var g errgroup.Group
|
||||
done := make(chan error)
|
||||
|
||||
for _, proc := range procs {
|
||||
proc := proc
|
||||
for _, step := range steps {
|
||||
// required since otherwise the loop variable
|
||||
// will be captured by the function. This will
|
||||
// recreate the step "variable"
|
||||
step := step
|
||||
g.Go(func() error {
|
||||
return r.exec(proc)
|
||||
// Case the pipeline was already complete.
|
||||
switch {
|
||||
case r.err != nil && !step.OnFailure:
|
||||
return nil
|
||||
case r.err == nil && !step.OnSuccess:
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trace started.
|
||||
err := r.traceStep(nil, nil, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processState, err := r.exec(step)
|
||||
|
||||
// Return the error after tracing it.
|
||||
traceErr := r.traceStep(processState, err, step)
|
||||
if traceErr != nil {
|
||||
return traceErr
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -103,86 +151,54 @@ func (r *Runtime) execAll(procs []*backend.Step) <-chan error {
|
|||
return done
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
func (r *Runtime) exec(proc *backend.Step) error {
|
||||
switch {
|
||||
case r.err != nil && !proc.OnFailure:
|
||||
return nil
|
||||
case r.err == nil && !proc.OnSuccess:
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.tracer != nil {
|
||||
state := new(State)
|
||||
state.Pipeline.Time = r.started
|
||||
state.Pipeline.Error = r.err
|
||||
state.Pipeline.Step = proc
|
||||
state.Process = new(backend.State) // empty
|
||||
if err := r.tracer.Trace(state); err == ErrSkip {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Executes the step and returns the state and error.
|
||||
func (r *Runtime) exec(step *backend.Step) (*backend.State, error) {
|
||||
// TODO: using DRONE_ will be deprecated with 0.15.0. remove fallback with following release
|
||||
for key, value := range proc.Environment {
|
||||
for key, value := range step.Environment {
|
||||
if strings.HasPrefix(key, "CI_") {
|
||||
proc.Environment[strings.Replace(key, "CI_", "DRONE_", 1)] = value
|
||||
step.Environment[strings.Replace(key, "CI_", "DRONE_", 1)] = value
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.engine.Exec(r.ctx, proc); err != nil {
|
||||
return err
|
||||
if err := r.engine.Exec(r.ctx, step); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.logger != nil {
|
||||
rc, err := r.engine.Tail(r.ctx, proc)
|
||||
rc, err := r.engine.Tail(r.ctx, step)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := r.logger.Log(proc, multipart.New(rc)); err != nil {
|
||||
if err := r.logger.Log(step, multipart.New(rc)); err != nil {
|
||||
log.Error().Err(err).Msg("process logging failed")
|
||||
}
|
||||
_ = rc.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
if proc.Detached {
|
||||
return nil
|
||||
// nothing else to do, this is a detached process.
|
||||
if step.Detached {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
wait, err := r.engine.Wait(r.ctx, proc)
|
||||
waitState, err := r.engine.Wait(r.ctx, step)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.tracer != nil {
|
||||
state := new(State)
|
||||
state.Pipeline.Time = r.started
|
||||
state.Pipeline.Error = r.err
|
||||
state.Pipeline.Step = proc
|
||||
state.Process = wait
|
||||
if err := r.tracer.Trace(state); err != nil {
|
||||
return err
|
||||
if waitState.OOMKilled {
|
||||
return waitState, &OomError{
|
||||
Name: step.Name,
|
||||
Code: waitState.ExitCode,
|
||||
}
|
||||
} else if waitState.ExitCode != 0 {
|
||||
return waitState, &ExitError{
|
||||
Name: step.Name,
|
||||
Code: waitState.ExitCode,
|
||||
}
|
||||
}
|
||||
|
||||
if wait.OOMKilled {
|
||||
return &OomError{
|
||||
Name: proc.Name,
|
||||
Code: wait.ExitCode,
|
||||
}
|
||||
} else if wait.ExitCode != 0 {
|
||||
return &ExitError{
|
||||
Name: proc.Name,
|
||||
Code: wait.ExitCode,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return waitState, nil
|
||||
}
|
||||
|
|
|
@ -8,15 +8,17 @@
|
|||
<Icon name="close" class="ml-auto" />
|
||||
</div>
|
||||
|
||||
<div v-for="logLine in logLines" :key="logLine.pos" class="flex items-center">
|
||||
<div class="text-gray-500 text-sm w-4">{{ (logLine.pos || 0) + 1 }}</div>
|
||||
<!-- eslint-disable-next-line vue/no-v-html -->
|
||||
<div class="mx-4 text-gray-200 dark:text-gray-400" v-html="logLine.out" />
|
||||
<div class="ml-auto text-gray-500 text-sm">{{ logLine.time || 0 }}s</div>
|
||||
</div>
|
||||
<div v-if="proc?.end_time !== undefined" class="text-gray-500 text-sm mt-4 ml-8">
|
||||
exit code {{ proc.exit_code }}
|
||||
</div>
|
||||
<template v-if="!proc?.error">
|
||||
<div v-for="logLine in logLines" :key="logLine.pos" class="flex items-center">
|
||||
<div class="text-gray-500 text-sm w-4">{{ (logLine.pos || 0) + 1 }}</div>
|
||||
<!-- eslint-disable-next-line vue/no-v-html -->
|
||||
<div class="mx-4 text-gray-200 dark:text-gray-400" v-html="logLine.out" />
|
||||
<div class="ml-auto text-gray-500 text-sm">{{ logLine.time || 0 }}s</div>
|
||||
</div>
|
||||
<div v-if="proc?.end_time !== undefined" class="text-gray-500 text-sm mt-4 ml-8">
|
||||
exit code {{ proc.exit_code }}
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<div class="text-gray-300 mx-auto">
|
||||
<span v-if="proc?.error" class="text-red-500">{{ proc.error }}</span>
|
||||
|
|
|
@ -35,11 +35,11 @@ export default () => {
|
|||
return;
|
||||
}
|
||||
|
||||
if (isProcFinished(_proc)) {
|
||||
if (_proc.error) {
|
||||
logs.value = undefined;
|
||||
} else if (isProcFinished(_proc)) {
|
||||
logs.value = await apiClient.getLogs(owner, repo, build, _proc.pid);
|
||||
}
|
||||
|
||||
if (isProcRunning(_proc)) {
|
||||
} else if (isProcRunning(_proc)) {
|
||||
// load stream of parent process (which receives all child processes logs)
|
||||
stream = apiClient.streamLogs(owner, repo, build, _proc.ppid, onLogsUpdate);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue