Merge branch 'main' into make-windows-container-work-again

This commit is contained in:
6543 2024-11-09 21:01:56 +01:00 committed by GitHub
commit 125e777bc5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
54 changed files with 701 additions and 505 deletions

View file

@ -245,7 +245,6 @@
"**/testdata/**",
"docs/versioned_docs/",
"package.json",
"91-migrations.md",
// generated
"go.sum",
"flake.lock",

View file

@ -2,7 +2,9 @@ when:
- event: tag
- event: pull_request
branch: ${CI_REPO_DEFAULT_BRANCH}
path: Makefile
path:
- Makefile
- .woodpecker/binaries.yaml
variables:
- &golang_image 'docker.io/golang:1.23'

View file

@ -1,6 +1,6 @@
steps:
- name: release-helper
image: woodpeckerci/plugin-ready-release-go:2.0.0
image: docker.io/woodpeckerci/plugin-ready-release-go:2.1.1
settings:
release_branch: ${CI_COMMIT_BRANCH}
forge_type: github
@ -13,5 +13,3 @@ when:
branch:
- ${CI_REPO_DEFAULT_BRANCH}
- release/*
- event: manual
evaluate: 'TASK == "release-helper"'

View file

@ -13,12 +13,14 @@ steps:
depends_on: []
image: *trivy_plugin
settings:
server: server
skip-dirs: web/,docs/
docs:
depends_on: []
image: *trivy_plugin
settings:
server: server
skip-dirs: node_modules/,plugins/woodpecker-plugins/node_modules/
dir: docs/
@ -26,5 +28,18 @@ steps:
depends_on: []
image: *trivy_plugin
settings:
server: server
skip-dirs: node_modules/
dir: web/
services:
server:
image: *trivy_plugin
# settings:
# service: true
# db-repository: docker.io/aquasec/trivy-db:2
environment:
PLUGIN_SERVICE: 'true'
PLUGIN_DB_REPOSITORY: 'docker.io/aquasec/trivy-db:2'
ports:
- 10000

View file

@ -27,14 +27,34 @@ import (
var flags = append([]cli.Flag{
&cli.BoolFlag{
Sources: cli.EnvVars("WOODPECKER_LOG_XORM"),
Name: "log-xorm",
Usage: "enable xorm logging",
Sources: cli.EnvVars("WOODPECKER_DATABASE_LOG", "WOODPECKER_LOG_XORM"),
Name: "db-log",
Aliases: []string{"log-xorm"}, // TODO: remove in v4.0.0
Usage: "enable logging in database engine (currently xorm)",
},
&cli.BoolFlag{
Sources: cli.EnvVars("WOODPECKER_LOG_XORM_SQL"),
Name: "log-xorm-sql",
Usage: "enable xorm sql command logging",
Sources: cli.EnvVars("WOODPECKER_DATABASE_LOG_SQL", "WOODPECKER_LOG_XORM_SQL"),
Name: "db-log-sql",
Aliases: []string{"log-xorm-sql"}, // TODO: remove in v4.0.0
Usage: "enable logging of sql commands",
},
&cli.IntFlag{
Sources: cli.EnvVars("WOODPECKER_DATABASE_MAX_CONNECTIONS"),
Name: "db-max-open-connections",
Usage: "max connections xorm is allowed create",
Value: 100,
},
&cli.IntFlag{
Sources: cli.EnvVars("WOODPECKER_DATABASE_IDLE_CONNECTIONS"),
Name: "db-max-idle-connections",
Usage: "amount of connections xorm will hold open",
Value: 2,
},
&cli.DurationFlag{
Sources: cli.EnvVars("WOODPECKER_DATABASE_CONNECTION_TIMEOUT"),
Name: "db-max-connection-timeout",
Usage: "time an active connection is allowed to stay open",
Value: 3 * time.Second,
},
&cli.StringFlag{
Sources: cli.EnvVars("WOODPECKER_HOST"),
@ -205,7 +225,8 @@ var flags = append([]cli.Flag{
},
&cli.StringFlag{
Sources: cli.EnvVars("WOODPECKER_DATABASE_DRIVER"),
Name: "driver",
Name: "db-driver",
Aliases: []string{"driver"}, // TODO: remove in v4.0.0
Usage: "database driver",
Value: "sqlite3",
},
@ -213,9 +234,10 @@ var flags = append([]cli.Flag{
Sources: cli.NewValueSourceChain(
cli.File(os.Getenv("WOODPECKER_DATABASE_DATASOURCE_FILE")),
cli.EnvVar("WOODPECKER_DATABASE_DATASOURCE")),
Name: "datasource",
Usage: "database driver configuration string",
Value: datasourceDefaultValue(),
Name: "db-datasource",
Aliases: []string{"datasource"}, // TODO: remove in v4.0.0
Usage: "database driver configuration string",
Value: datasourceDefaultValue(),
},
&cli.StringFlag{
Sources: cli.NewValueSourceChain(

View file

@ -51,11 +51,14 @@ const (
)
func setupStore(ctx context.Context, c *cli.Command) (store.Store, error) {
datasource := c.String("datasource")
driver := c.String("driver")
datasource := c.String("db-datasource")
driver := c.String("db-driver")
xorm := store.XORM{
Log: c.Bool("log-xorm"),
ShowSQL: c.Bool("log-xorm-sql"),
Log: c.Bool("db-log"),
ShowSQL: c.Bool("db-log-sql"),
MaxOpenConns: int(c.Int("db-max-open-connections")),
MaxIdleConns: int(c.Int("db-max-idle-connections")),
ConnMaxLifetime: c.Duration("db-max-connection-timeout"),
}
if driver == "sqlite3" {
@ -103,8 +106,11 @@ func checkSqliteFileExist(path string) error {
return err
}
func setupQueue(ctx context.Context, s store.Store) queue.Queue {
return queue.WithTaskStore(ctx, queue.New(ctx), s)
func setupQueue(ctx context.Context, s store.Store) (queue.Queue, error) {
return queue.New(ctx, queue.Config{
Backend: queue.TypeMemory,
Store: s,
})
}
func setupMembershipService(_ context.Context, _store store.Store) cache.MembershipService {
@ -143,18 +149,19 @@ func setupJWTSecret(_store store.Store) (string, error) {
return jwtSecret, nil
}
func setupEvilGlobals(ctx context.Context, c *cli.Command, s store.Store) error {
func setupEvilGlobals(ctx context.Context, c *cli.Command, s store.Store) (err error) {
// services
server.Config.Services.Queue = setupQueue(ctx, s)
server.Config.Services.Logs = logging.New()
server.Config.Services.Pubsub = pubsub.New()
server.Config.Services.Membership = setupMembershipService(ctx, s)
serviceManager, err := services.NewManager(c, s, setup.Forge)
server.Config.Services.Queue, err = setupQueue(ctx, s)
if err != nil {
return fmt.Errorf("could not setup queue: %w", err)
}
server.Config.Services.Manager, err = services.NewManager(c, s, setup.Forge)
if err != nil {
return fmt.Errorf("could not setup service manager: %w", err)
}
server.Config.Services.Manager = serviceManager
server.Config.Services.LogStore, err = setupLogStore(c, s)
if err != nil {
return fmt.Errorf("could not setup log store: %w", err)

View file

@ -53,7 +53,7 @@ Security is pretty important to us and we want to make sure that no one can stea
## Migration notes
There have been a few more breaking changes. [Read more about what you need to do when upgrading!](../docs/migrations#200)
There have been a few more breaking changes. [Read more about what you need to do when upgrading!](/migrations#200)
## New features

View file

@ -14,6 +14,8 @@ tags: [community, image, podman]
I run Woodpecker CI with podman backend instead of docker and just figured out how to build images with buildah. Since I couldn't find this anywhere documented, I thought I might as well just share it here.
<!-- truncate -->
It's actually pretty straight forward. Here's what my repository structure looks like:
```bash

View file

@ -15,6 +15,8 @@ tags: [community, debug]
Sometimes you want to debug a pipeline.
Therefore I recently discovered: <https://github.com/ekzhang/sshx>
<!-- truncate -->
A simple step like should allow you to debug:
```yaml

View file

@ -14,6 +14,8 @@ tags: [community, image, podman, sigstore, signature]
This example shows how to build a container image with podman while verifying the base image and signing the resulting image.
<!-- truncate -->
The image being pulled uses a keyless signature, while the image being built will be signed by a pre-generated private key.
## Prerequisites

View file

@ -87,4 +87,4 @@ be removed in the next major release:
- Use `WOODPECKER_EXPERT_FORGE_OAUTH_HOST` instead of `WOODPECKER_DEV_GITEA_OAUTH_URL` or `WOODPECKER_DEV_OAUTH_HOST`
- Deprecated `WOODPECKER_WEBHOOK_HOST` in favor of `WOODPECKER_EXPERT_WEBHOOK_HOST`
For a full list of deprecations that will be dropped in the `next` major release `3.0.0` (no eta yet), please check the [migrations](/docs/migrations#next) section.
For a full list of deprecations that will be dropped in the `next` major release `3.0.0` (no eta yet), please check the [migrations](/migrations#next) section.

View file

@ -289,7 +289,7 @@ The available events are:
- `pull_request_closed`: triggered when a pull request is closed or merged.
- `tag`: triggered when a tag is pushed.
- `release`: triggered when a release, pre-release or draft is created. (You can apply further filters using [evaluate](#evaluate) with [environment variables](./50-environment.md#built-in-environment-variables).)
- `deployment` (only available for GitHub): triggered when a deployment is created in the repository.
- `deployment`: triggered when a deployment is created in the repository. (This event can be triggered from Woodpecker directly. GitHub also supports webhook triggers.)
- `cron`: triggered when a cron job is executed.
- `manual`: triggered when a user manually triggers a pipeline.

View file

@ -87,4 +87,4 @@ All configuration options can be found via [NixOS Search](https://search.nixos.o
## Tips and tricks
There are some resources on how to utilize Woodpecker more effectively with NixOS on the [Awesome Woodpecker](../../92-awesome.md) page, like using the runners nix-store in the pipeline.
There are some resources on how to utilize Woodpecker more effectively with NixOS on the [Awesome Woodpecker](/awesome) page, like using the runners nix-store in the pipeline.

View file

@ -159,17 +159,35 @@ Configures the logging level. Possible values are `trace`, `debug`, `info`, `war
Output destination for logs.
'stdout' and 'stderr' can be used as special keywords.
### `WOODPECKER_LOG_XORM`
### `WOODPECKER_DATABASE_LOG`
> Default: `false`
Enable XORM logs.
Enable logging in database engine (currently xorm).
### `WOODPECKER_LOG_XORM_SQL`
### `WOODPECKER_DATABASE_LOG_SQL`
> Default: `false`
Enable XORM SQL command logs.
Enable logging of sql commands.
### `WOODPECKER_DATABASE_MAX_CONNECTIONS`
> Default: `100`
Max database connections xorm is allowed create.
### `WOODPECKER_DATABASE_IDLE_CONNECTIONS`
> Default: `2`
Amount of database connections xorm will hold open.
### `WOODPECKER_DATABASE_CONNECTION_TIMEOUT`
> Default: `3 Seconds`
Time an active database connection is allowed to stay open.
### `WOODPECKER_DEBUG_PRETTY`

View file

@ -8,6 +8,8 @@
| Event: Tag | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| Event: Pull-Request | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| Event: Release | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: |
| Event: Deploy | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
| Event: Deploy¹ | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
| [Multiple workflows](../../20-usage/25-workflows.md) | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| [when.path filter](../../20-usage/20-workflow-syntax.md#path) | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: |
¹ The deployment event can be triggered for all forges from Woodpecker directly. However, only GitHub can trigger them using webhooks.

View file

@ -50,13 +50,13 @@ const config: Config = {
position: 'left',
items: [
{
to: '/docs/next/migrations', // Always point to newest migration guide
activeBaseRegex: 'docs/(next/)?migrations',
to: '/migrations', // Always point to newest migration guide
activeBaseRegex: 'migrations',
label: 'Migrations',
},
{
to: '/docs/next/awesome', // Always point to newest awesome list
activeBaseRegex: 'docs/(next/)?awesome',
to: '/awesome', // Always point to newest awesome list
activeBaseRegex: 'awesome',
label: 'Awesome',
},
{

18
docs/src/pages/about.md Normal file
View file

@ -0,0 +1,18 @@
# About
Woodpecker has been originally forked from Drone 0.8 as the Drone CI license was changed after the 0.8 release from Apache 2.0 to a proprietary license. Woodpecker is based on this latest freely available version.
## History
Woodpecker was originally forked by [@laszlocph](https://github.com/laszlocph) in 2019.
A few important time points:
- [`2fbaa56`](https://github.com/woodpecker-ci/woodpecker/commit/2fbaa56eee0f4be7a3ca4be03dbd00c1bf5d1274) is the first commit of the fork, made on Apr 3, 2019.
- The first release [v0.8.91](https://github.com/woodpecker-ci/woodpecker/releases/tag/v0.8.91) was published on Apr 6, 2019.
- On Aug 27, 2019, the project was renamed to "Woodpecker" ([`630c383`](https://github.com/woodpecker-ci/woodpecker/commit/630c383181b10c4ec375e500c812c4b76b3c52b8)).
- The first release under the name "Woodpecker" was published on Sep 9, 2019 ([v0.8.104](https://github.com/woodpecker-ci/woodpecker/releases/tag/v0.8.104)).
## Differences to Drone
Woodpecker is a community-focused software that still stay free and open source forever, while Drone is managed by [Harness](https://harness.io/) and published under [Polyform Small Business](https://polyformproject.org/licenses/small-business/1.0.0/) license.

View file

@ -1,50 +1,80 @@
# Migrations
Some versions need some changes to the server configuration or the pipeline configuration files.
Some versions need some changes to the server configuration or the pipeline configuration files. If you are an user check the `User migrations` section of an version. As an admin of a Woodpecker server or agent check the `Admin migrations` section.
## `next`
- Deprecate `WOODPECKER_FILTER_LABELS` use `WOODPECKER_AGENT_LABELS`
:::info
This will be the next version of Woodpecker.
:::
## User migrations
- Removed built-in environment variables:
- `CI_COMMIT_URL` use `CI_PIPELINE_FORGE_URL`
- `CI_STEP_FINISHED` as empty during execution
- `CI_PIPELINE_FINISHED` as empty during execution
- `CI_PIPELINE_STATUS` was always `success`
- `CI_STEP_STATUS` was always `success`
- Set `/woodpecker` as defautl workdir for the **woodpecker-cli** container
- Move docker resource limit settings from server into agent configuration
- Rename server environment variable `WOODPECKER_ESCALATE` to `WOODPECKER_PLUGINS_PRIVILEGED`
- All default privileged plugins (like `woodpeckerci/plugin-docker-buildx`) were removed. Please carefully [re-add those plugins](./30-administration/10-server-config.md#woodpecker_plugins_privileged) you trust and rely on.
- `WOODPECKER_DEFAULT_CLONE_IMAGE` got depricated use `WOODPECKER_DEFAULT_CLONE_PLUGIN`
- Check trusted-clone- and privileged-plugins by image name and tag (if tag is set)
- Set `/woodpecker` as default workdir for the **woodpecker-cli** container
- Secret filters for plugins now check against tag if specified
- Removed `WOODPECKER_DEV_OAUTH_HOST` and `WOODPECKER_DEV_GITEA_OAUTH_URL` use `WOODPECKER_EXPERT_FORGE_OAUTH_HOST`
- Compatibility mode of deprecated `pipeline:`, `platform:` and `branches:` pipeline config options are now removed and pipeline will now fail if still in use.
- Removed `steps.[name].group` in favor of `steps.[name].depends_on` (see [workflow syntax](./20-usage/20-workflow-syntax.md#depends_on) to learn how to set dependencies)
- Removed `WOODPECKER_ROOT_PATH` and `WOODPECKER_ROOT_URL` config variables. Use `WOODPECKER_HOST` with a path instead
- Removed `steps.[name].group` in favor of `steps.[name].depends_on` (see [workflow syntax](/docs/usage/workflow-syntax#depends_on) to learn how to set dependencies)
- Pipelines without a config file will now be skipped instead of failing
- Removed implicitly defined `regcred` image pull secret name. Set it explicitly via `WOODPECKER_BACKEND_K8S_PULL_SECRET_NAMES`
- Removed `includes` and `excludes` support from **event** filter
- Removed uppercasing all secret env vars, instead, the value of the `secrets` property is used. [Read more](./20-usage/40-secrets.md#usage)
- Removed upper-casing all secret env vars, instead, the value of the `secrets` property is used. [Read more](/docs/usage/secrets#usage)
- Removed alternative names for secrets, use `environment` with `from_secret`
- Removed slice definition for env vars
- Removed `environment` filter, use `when.evaluate`
- Removed `WOODPECKER_WEBHOOK_HOST` in favor of `WOODPECKER_EXPERT_WEBHOOK_HOST`
- Migrated to rfc9421 for webhook signatures
- Renamed `start_time`, `end_time`, `created_at`, `started_at`, `finished_at` and `reviewed_at` JSON fields to `started`, `finished`, `created`, `started`, `finished`, `reviewed`
- JSON field `trusted` on repo model was changed from boolean to object
- Update all webhooks by pressing the "Repair all" button in the admin settings as the webhook token claims have changed
- Crons now use standard Linux syntax without seconds
- Replaced `configs` object by `netrc` in external configuration APIs
- Removed old API routes: `registry/` -> `registries`, `/authorize/token`
- Replaced `registry` command with `repo registry` in cli
- Disallow upgrades from 1.x, upgrade to 2.x first
- Deprecated `secrets`, use `environment` with `from_secret`
## Admin migrations
- Deprecate `WOODPECKER_LOG_XORM` and `WOODPECKER_LOG_XORM_SQL` use `"WOODPECKER_DATABASE_LOG` and `"WOODPECKER_DATABASE_LOG_SQL`
- Deprecate `WOODPECKER_FILTER_LABELS` use `WOODPECKER_AGENT_LABELS`
- Move docker resource limit settings from server into agent configuration
- Rename server environment variable `WOODPECKER_ESCALATE` to `WOODPECKER_PLUGINS_PRIVILEGED`
- All default privileged plugins (like `woodpeckerci/plugin-docker-buildx`) were removed. Please carefully [re-add those plugins](/docs/next/administration/server-config#woodpecker_plugins_privileged) you trust and rely on.
- `WOODPECKER_DEFAULT_CLONE_IMAGE` got deprecated use `WOODPECKER_DEFAULT_CLONE_PLUGIN`
- Check trusted-clone- and privileged-plugins by image name and tag (if tag is set)
- Removed `WOODPECKER_DEV_OAUTH_HOST` and `WOODPECKER_DEV_GITEA_OAUTH_URL` use `WOODPECKER_EXPERT_FORGE_OAUTH_HOST`
- Removed `WOODPECKER_ROOT_PATH` and `WOODPECKER_ROOT_URL` config variables. Use `WOODPECKER_HOST` with a path instead
- Removed implicitly defined `regcred` image pull secret name. Set it explicitly via `WOODPECKER_BACKEND_K8S_PULL_SECRET_NAMES`
- Removed slice definition for env vars
- Migrated to rfc9421 for webhook signatures
- Replaced `configs` object by `netrc` in external configuration APIs
- Disallow upgrades from 1.x, upgrade to 2.x first
## 2.7.2
To secure your instance, set `WOODPECKER_PLUGINS_PRIVILEGED` to only allow specific versions of the `woodpeckerci/plugin-docker-buildx` plugin, use version 5.0.0 or above. This prevents older, potentially unstable versions from being privileged.
For example, to allow only version 5.0.0, use:
```bash
WOODPECKER_PLUGINS_PRIVILEGED=woodpeckerci/plugin-docker-buildx:5.0.0
```
To allow multiple versions, you can separate them with commas:
```bash
WOODPECKER_PLUGINS_PRIVILEGED=woodpeckerci/plugin-docker-buildx:5.0.0,woodpeckerci/plugin-docker-buildx:5.1.0
```
This setup ensures only specified, stable plugin versions are given privileged access.
Read more about it in [#4213](https://github.com/woodpecker-ci/woodpecker/pull/4213)
## 2.0.0
- Dropped deprecated `CI_BUILD_*`, `CI_PREV_BUILD_*`, `CI_JOB_*`, `*_LINK`, `CI_SYSTEM_ARCH`, `CI_REPO_REMOTE` built-in environment variables
- Deprecated `platform:` filter in favor of `labels:`, [read more](./20-usage/20-workflow-syntax.md#filter-by-platform)
- Deprecated `platform:` filter in favor of `labels:`, [read more](/docs/usage/workflow-syntax#filter-by-platform)
- Secrets `event` property was renamed to `events` and `image` to `images` as both are lists. The new property `events` / `images` has to be used in the api. The old properties `event` and `image` were removed.
- The secrets `plugin_only` option was removed. Secrets with images are now always only available for plugins using listed by the `images` property. Existing secrets with a list of `images` will now only be available to the listed images if they are used as a plugin.
- Removed `build` alias for `pipeline` command in CLI
@ -56,8 +86,8 @@ Some versions need some changes to the server configuration or the pipeline conf
## 1.0.0
- The signature used to verify extension calls (like those used for the [config-extension](./30-administration/40-advanced/100-external-configuration-api.md)) done by the Woodpecker server switched from using a shared-secret HMac to an ed25519 key-pair. Read more about it at the [config-extensions](./30-administration/40-advanced/100-external-configuration-api.md) documentation.
- Refactored support for old agent filter labels and expressions. Learn how to use the new [filter](./20-usage/20-workflow-syntax.md#labels)
- The signature used to verify extension calls (like those used for the [config-extension](/docs/administration/advanced/external-configuration-api)) done by the Woodpecker server switched from using a shared-secret HMac to an ed25519 key-pair. Read more about it at the [config-extensions](/docs/administration/advanced/external-configuration-api) documentation.
- Refactored support for old agent filter labels and expressions. Learn how to use the new [filter](/docs/usage/workflow-syntax#labels)
- Renamed step environment variable `CI_SYSTEM_ARCH` to `CI_SYSTEM_PLATFORM`. Same applies for the cli exec variable.
- Renamed environment variables `CI_BUILD_*` and `CI_PREV_BUILD_*` to `CI_PIPELINE_*` and `CI_PREV_PIPELINE_*`, old ones are still available but deprecated
- Renamed environment variables `CI_JOB_*` to `CI_STEP_*`, old ones are still available but deprecated
@ -66,7 +96,7 @@ Some versions need some changes to the server configuration or the pipeline conf
- Renamed API endpoints for pipelines (`<owner>/<repo>/builds/<buildId>` -> `<owner>/<repo>/pipelines/<pipelineId>`), old ones are still available but deprecated
- Updated Prometheus gauge `build_*` to `pipeline_*`
- Updated Prometheus gauge `*_job_*` to `*_step_*`
- Renamed config env `WOODPECKER_MAX_PROCS` to `WOODPECKER_MAX_WORKFLOWS` (still available as fallback)
- Renamed config env `WOODPECKER_MAX_PROCS` to `WOODPECKER_MAX_WORKFLOWS` (still available as fallback) <!-- cspell:ignore PROCS -->
- The pipelines are now also read from `.yaml` files, the new default order is `.woodpecker/*.yml` and `.woodpecker/*.yaml` (without any prioritization) -> `.woodpecker.yml` -> `.woodpecker.yaml`
- Dropped support for [Coding](https://coding.net/), [Gogs](https://gogs.io) and Bitbucket Server (Stash).
- `/api/queue/resume` & `/api/queue/pause` endpoint methods were changed from `GET` to `POST`
@ -95,7 +125,7 @@ Some versions need some changes to the server configuration or the pipeline conf
Only projects created after updating will have an empty value by default. Existing projects will stick to the current pipeline path which is `.drone.yml` in most cases.
Read more about it at the [Project Settings](./20-usage/75-project-settings.md#pipeline-path)
Read more about it at the [Project Settings](/docs/usage/project-settings#pipeline-path)
- From version `0.15.0` ongoing there will be three types of docker images: `latest`, `next` and `x.x.x` with an alpine variant for each type like `latest-alpine`.
If you used `latest` before to try pre-release features you should switch to `next` after this release.
@ -130,7 +160,7 @@ Some versions need some changes to the server configuration or the pipeline conf
- CI_SOURCE_BRANCH => use CI_COMMIT_SOURCE_BRANCH
- CI_TARGET_BRANCH => use CI_COMMIT_TARGET_BRANCH
For all available variables and their descriptions have a look at [built-in-environment-variables](./20-usage/50-environment.md#built-in-environment-variables).
For all available variables and their descriptions have a look at [built-in-environment-variables](/docs/usage/environment#built-in-environment-variables).
- Prometheus metrics have been changed from `drone_*` to `woodpecker_*`

View file

@ -19,7 +19,7 @@ the actual release will be about a week later.
### Deprecations & migrations
All deprecations and migrations for Woodpecker users and instance admins are documented in the [migration guide](/docs/next/migrations).
All deprecations and migrations for Woodpecker users and instance admins are documented in the [migration guide](/migrations).
## Next version (current state of the `main` branch)
@ -33,7 +33,11 @@ Here you can find documentation for previous versions of Woodpecker.
| | | |
| ------- | ---------- | ------------------------------------------------------------------------------------- |
| 2.6.0 | 2024-07-18 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.6.0/docs/docs/) |
| 2.7.2 | 2024-11-03 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.7.2/docs/docs/) |
| 2.7.1 | 2024-09-07 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.7.1/docs/docs/) |
| 2.7.0 | 2024-07-18 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.7.0/docs/docs/) |
| 2.6.1 | 2024-07-19 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.6.1/docs/docs/) |
| 2.6.0 | 2024-06-13 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.6.0/docs/docs/) |
| 2.5.0 | 2024-06-01 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.5.0/docs/docs/) |
| 2.4.1 | 2024-03-20 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.4.1/docs/docs/) |
| 2.4.0 | 2024-03-19 | [Documentation](https://github.com/woodpecker-ci/woodpecker/tree/v2.4.0/docs/docs/) |

View file

@ -18,15 +18,15 @@ import (
"encoding/base64"
)
func GenerateContainerConf(commands []string, goos string) (env map[string]string, entry []string) {
func GenerateContainerConf(commands []string, goos, workDir string) (env map[string]string, entry []string) {
env = make(map[string]string)
if goos == "windows" {
env["CI_SCRIPT"] = base64.StdEncoding.EncodeToString([]byte(generateScriptWindows(commands)))
env["CI_SCRIPT"] = base64.StdEncoding.EncodeToString([]byte(generateScriptWindows(commands, workDir)))
env["SHELL"] = "powershell.exe"
// cspell:disable-next-line
entry = []string{"powershell", "-noprofile", "-noninteractive", "-command", "[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Env:CI_SCRIPT)) | iex"}
} else {
env["CI_SCRIPT"] = base64.StdEncoding.EncodeToString([]byte(generateScriptPosix(commands)))
env["CI_SCRIPT"] = base64.StdEncoding.EncodeToString([]byte(generateScriptPosix(commands, workDir)))
env["SHELL"] = "/bin/sh"
entry = []string{"/bin/sh", "-c", "echo $CI_SCRIPT | base64 -d | /bin/sh -e"}
}

View file

@ -17,16 +17,22 @@ package common
import (
"bytes"
"fmt"
"text/template"
"al.essio.dev/pkg/shellescape"
)
// generateScriptPosix is a helper function that generates a step script
// for a linux container using the given.
func generateScriptPosix(commands []string) string {
func generateScriptPosix(commands []string, workDir string) string {
var buf bytes.Buffer
buf.WriteString(setupScript)
if err := setupScriptTmpl.Execute(&buf, map[string]string{
"WorkDir": workDir,
}); err != nil {
// should never happen but well we have an error to trance
return fmt.Sprintf("echo 'failed to generate posix script from commands: %s'; exit 1", err.Error())
}
for _, command := range commands {
buf.WriteString(fmt.Sprintf(
@ -39,9 +45,9 @@ func generateScriptPosix(commands []string) string {
return buf.String()
}
// setupScript is a helper script this is added to the step script to ensure
// setupScriptProto is a helper script this is added to the step script to ensure
// a minimum set of environment variables are set correctly.
const setupScript = `
const setupScriptProto = `
if [ -n "$CI_NETRC_MACHINE" ]; then
cat <<EOF > $HOME/.netrc
machine $CI_NETRC_MACHINE
@ -53,10 +59,12 @@ fi
unset CI_NETRC_USERNAME
unset CI_NETRC_PASSWORD
unset CI_SCRIPT
mkdir -p "$CI_WORKSPACE"
cd "$CI_WORKSPACE"
mkdir -p "{{.WorkDir}}"
cd "{{.WorkDir}}"
`
var setupScriptTmpl, _ = template.New("").Parse(setupScriptProto)
// traceScript is a helper script that is added to the step script
// to trace a command.
const traceScript = `

View file

@ -16,6 +16,7 @@ package common
import (
"testing"
"text/template"
"github.com/stretchr/testify/assert"
)
@ -39,8 +40,8 @@ fi
unset CI_NETRC_USERNAME
unset CI_NETRC_PASSWORD
unset CI_SCRIPT
mkdir -p "$CI_WORKSPACE"
cd "$CI_WORKSPACE"
mkdir -p "/woodpecker/some"
cd "/woodpecker/some"
echo + 'echo ${PATH}'
echo ${PATH}
@ -54,7 +55,13 @@ go test
},
}
for _, test := range testdata {
script := generateScriptPosix(test.from)
script := generateScriptPosix(test.from, "/woodpecker/some")
assert.EqualValues(t, test.want, script, "Want encoded script for %s", test.from)
}
}
func TestSetupScriptProtoParse(t *testing.T) {
// just ensure that we have a working `setupScriptTmpl` on runntime
_, err := template.New("").Parse(setupScriptProto)
assert.NoError(t, err)
}

View file

@ -21,16 +21,16 @@ import (
)
const (
windowsScriptBase64 = "CiRFcnJvckFjdGlvblByZWZlcmVuY2UgPSAnU3RvcCc7CmlmIChbRW52aXJvbm1lbnRdOjpHZXRFbnZpcm9ubWVudFZhcmlhYmxlKCdDSV9XT1JLU1BBQ0UnKSkgeyBpZiAoLW5vdCAoVGVzdC1QYXRoICIkZW52OkNJX1dPUktTUEFDRSIpKSB7IE5ldy1JdGVtIC1QYXRoICIkZW52OkNJX1dPUktTUEFDRSIgLUl0ZW1UeXBlIERpcmVjdG9yeSAtRm9yY2UgfX07CmlmICgtbm90IFtFbnZpcm9ubWVudF06OkdldEVudmlyb25tZW50VmFyaWFibGUoJ0hPTUUnKSkgeyBbRW52aXJvbm1lbnRdOjpTZXRFbnZpcm9ubWVudFZhcmlhYmxlKCdIT01FJywgJ2M6XHJvb3QnKSB9OwppZiAoLW5vdCAoVGVzdC1QYXRoICIkZW52OkhPTUUiKSkgeyBOZXctSXRlbSAtUGF0aCAiJGVudjpIT01FIiAtSXRlbVR5cGUgRGlyZWN0b3J5IC1Gb3JjZSB9OwppZiAoJEVudjpDSV9ORVRSQ19NQUNISU5FKSB7CiRuZXRyYz1bc3RyaW5nXTo6Rm9ybWF0KCJ7MH1cX25ldHJjIiwkRW52OkhPTUUpOwoibWFjaGluZSAkRW52OkNJX05FVFJDX01BQ0hJTkUiID4+ICRuZXRyYzsKImxvZ2luICRFbnY6Q0lfTkVUUkNfVVNFUk5BTUUiID4+ICRuZXRyYzsKInBhc3N3b3JkICRFbnY6Q0lfTkVUUkNfUEFTU1dPUkQiID4+ICRuZXRyYzsKfTsKW0Vudmlyb25tZW50XTo6U2V0RW52aXJvbm1lbnRWYXJpYWJsZSgiQ0lfTkVUUkNfUEFTU1dPUkQiLCRudWxsKTsKW0Vudmlyb25tZW50XTo6U2V0RW52aXJvbm1lbnRWYXJpYWJsZSgiQ0lfU0NSSVBUIiwkbnVsbCk7CmlmIChbRW52aXJvbm1lbnRdOjpHZXRFbnZpcm9ubWVudFZhcmlhYmxlKCdDSV9XT1JLU1BBQ0UnKSkgeyBjZCAiJGVudjpDSV9XT1JLU1BBQ0UiIH07CgpXcml0ZS1PdXRwdXQgKCcrICJlY2hvIGhlbGxvIHdvcmxkIicpOwomIGVjaG8gaGVsbG8gd29ybGQ7IGlmICgkTEFTVEVYSVRDT0RFIC1uZSAwKSB7ZXhpdCAkTEFTVEVYSVRDT0RFfQoK"
posixScriptBase64 = "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiJENJX1dPUktTUEFDRSIKY2QgIiRDSV9XT1JLU1BBQ0UiCgplY2hvICsgJ2VjaG8gaGVsbG8gd29ybGQnCmVjaG8gaGVsbG8gd29ybGQK"
windowsScriptBase64 = "CiRFcnJvckFjdGlvblByZWZlcmVuY2UgPSAnU3RvcCc7CmlmICgtbm90IChUZXN0LVBhdGggIi93b29kcGVja2VyL3NvbWUiKSkgeyBOZXctSXRlbSAtUGF0aCAiL3dvb2RwZWNrZXIvc29tZSIgLUl0ZW1UeXBlIERpcmVjdG9yeSAtRm9yY2UgfTsKaWYgKC1ub3QgW0Vudmlyb25tZW50XTo6R2V0RW52aXJvbm1lbnRWYXJpYWJsZSgnSE9NRScpKSB7IFtFbnZpcm9ubWVudF06OlNldEVudmlyb25tZW50VmFyaWFibGUoJ0hPTUUnLCAnYzpccm9vdCcpIH07CmlmICgtbm90IChUZXN0LVBhdGggIiRlbnY6SE9NRSIpKSB7IE5ldy1JdGVtIC1QYXRoICIkZW52OkhPTUUiIC1JdGVtVHlwZSBEaXJlY3RvcnkgLUZvcmNlIH07CmlmICgkRW52OkNJX05FVFJDX01BQ0hJTkUpIHsKJG5ldHJjPVtzdHJpbmddOjpGb3JtYXQoInswfVxfbmV0cmMiLCRFbnY6SE9NRSk7CiJtYWNoaW5lICRFbnY6Q0lfTkVUUkNfTUFDSElORSIgPj4gJG5ldHJjOwoibG9naW4gJEVudjpDSV9ORVRSQ19VU0VSTkFNRSIgPj4gJG5ldHJjOwoicGFzc3dvcmQgJEVudjpDSV9ORVRSQ19QQVNTV09SRCIgPj4gJG5ldHJjOwp9OwpbRW52aXJvbm1lbnRdOjpTZXRFbnZpcm9ubWVudFZhcmlhYmxlKCJDSV9ORVRSQ19QQVNTV09SRCIsJG51bGwpOwpbRW52aXJvbm1lbnRdOjpTZXRFbnZpcm9ubWVudFZhcmlhYmxlKCJDSV9TQ1JJUFQiLCRudWxsKTsKY2QgIi93b29kcGVja2VyL3NvbWUiOwoKV3JpdGUtT3V0cHV0ICgnKyAiZWNobyBoZWxsbyB3b3JsZCInKTsKJiBlY2hvIGhlbGxvIHdvcmxkOyBpZiAoJExBU1RFWElUQ09ERSAtbmUgMCkge2V4aXQgJExBU1RFWElUQ09ERX0K"
posixScriptBase64 = "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiL3dvb2RwZWNrZXIvc29tZSIKY2QgIi93b29kcGVja2VyL3NvbWUiCgplY2hvICsgJ2VjaG8gaGVsbG8gd29ybGQnCmVjaG8gaGVsbG8gd29ybGQK"
)
func TestGenerateContainerConf(t *testing.T) {
gotEnv, gotEntry := GenerateContainerConf([]string{"echo hello world"}, "windows")
gotEnv, gotEntry := GenerateContainerConf([]string{"echo hello world"}, "windows", "/woodpecker/some")
assert.Equal(t, windowsScriptBase64, gotEnv["CI_SCRIPT"])
assert.Equal(t, "powershell.exe", gotEnv["SHELL"])
assert.Equal(t, []string{"powershell", "-noprofile", "-noninteractive", "-command", "[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($Env:CI_SCRIPT)) | iex"}, gotEntry)
gotEnv, gotEntry = GenerateContainerConf([]string{"echo hello world"}, "linux")
gotEnv, gotEntry = GenerateContainerConf([]string{"echo hello world"}, "linux", "/woodpecker/some")
assert.Equal(t, posixScriptBase64, gotEnv["CI_SCRIPT"])
assert.Equal(t, "/bin/sh", gotEnv["SHELL"])
assert.Equal(t, []string{"/bin/sh", "-c", "echo $CI_SCRIPT | base64 -d | /bin/sh -e"}, gotEntry)

View file

@ -18,10 +18,19 @@ import (
"bytes"
"fmt"
"strings"
"text/template"
)
func generateScriptWindows(commands []string) string {
func generateScriptWindows(commands []string, workDir string) string {
var buf bytes.Buffer
if err := setupScriptWinTmpl.Execute(&buf, map[string]string{
"WorkDir": workDir,
}); err != nil {
// should never happen but well we have an error to trance
return fmt.Sprintf("echo 'failed to generate posix script from commands: %s'; exit 1", err.Error())
}
for _, command := range commands {
escaped := fmt.Sprintf("%q", command)
escaped = strings.ReplaceAll(escaped, "$", `\$`)
@ -31,16 +40,13 @@ func generateScriptWindows(commands []string) string {
command,
))
}
script := fmt.Sprintf(
setupScriptWin,
buf.String(),
)
return script
return buf.String()
}
const setupScriptWin = `
const setupScriptWinProto = `
$ErrorActionPreference = 'Stop';
if ([Environment]::GetEnvironmentVariable('CI_WORKSPACE')) { if (-not (Test-Path "$env:CI_WORKSPACE")) { New-Item -Path "$env:CI_WORKSPACE" -ItemType Directory -Force }};
if (-not (Test-Path "{{.WorkDir}}")) { New-Item -Path "{{.WorkDir}}" -ItemType Directory -Force };
if (-not [Environment]::GetEnvironmentVariable('HOME')) { [Environment]::SetEnvironmentVariable('HOME', 'c:\root') };
if (-not (Test-Path "$env:HOME")) { New-Item -Path "$env:HOME" -ItemType Directory -Force };
if ($Env:CI_NETRC_MACHINE) {
@ -51,10 +57,11 @@ $netrc=[string]::Format("{0}\_netrc",$Env:HOME);
};
[Environment]::SetEnvironmentVariable("CI_NETRC_PASSWORD",$null);
[Environment]::SetEnvironmentVariable("CI_SCRIPT",$null);
if ([Environment]::GetEnvironmentVariable('CI_WORKSPACE')) { cd "$env:CI_WORKSPACE" };
%s
cd "{{.WorkDir}}";
`
var setupScriptWinTmpl, _ = template.New("").Parse(setupScriptWinProto)
// traceScript is a helper script that is added to the step script
// to trace a command.
const traceScriptWin = `

View file

@ -16,6 +16,7 @@ package common
import (
"testing"
"text/template"
"github.com/stretchr/testify/assert"
)
@ -29,7 +30,7 @@ func TestGenerateScriptWin(t *testing.T) {
from: []string{"echo %PATH%", "go build", "go test"},
want: `
$ErrorActionPreference = 'Stop';
if ([Environment]::GetEnvironmentVariable('CI_WORKSPACE')) { if (-not (Test-Path "$env:CI_WORKSPACE")) { New-Item -Path "$env:CI_WORKSPACE" -ItemType Directory -Force }};
if (-not (Test-Path "/woodpecker/some")) { New-Item -Path "/woodpecker/some" -ItemType Directory -Force };
if (-not [Environment]::GetEnvironmentVariable('HOME')) { [Environment]::SetEnvironmentVariable('HOME', 'c:\root') };
if (-not (Test-Path "$env:HOME")) { New-Item -Path "$env:HOME" -ItemType Directory -Force };
if ($Env:CI_NETRC_MACHINE) {
@ -40,7 +41,7 @@ $netrc=[string]::Format("{0}\_netrc",$Env:HOME);
};
[Environment]::SetEnvironmentVariable("CI_NETRC_PASSWORD",$null);
[Environment]::SetEnvironmentVariable("CI_SCRIPT",$null);
if ([Environment]::GetEnvironmentVariable('CI_WORKSPACE')) { cd "$env:CI_WORKSPACE" };
cd "/woodpecker/some";
Write-Output ('+ "echo %PATH%"');
& echo %PATH%; if ($LASTEXITCODE -ne 0) {exit $LASTEXITCODE}
@ -50,12 +51,17 @@ Write-Output ('+ "go build"');
Write-Output ('+ "go test"');
& go test; if ($LASTEXITCODE -ne 0) {exit $LASTEXITCODE}
`,
},
}
for _, test := range testdata {
script := generateScriptWindows(test.from)
script := generateScriptWindows(test.from, "/woodpecker/some")
assert.EqualValues(t, test.want, script, "Want encoded script for %s", test.from)
}
}
func TestSetupScriptWinProtoParse(t *testing.T) {
// just ensure that we have a working `setupScriptWinTmpl` on runntime
_, err := template.New("").Parse(setupScriptWinProto)
assert.NoError(t, err)
}

View file

@ -47,11 +47,14 @@ func (e *docker) toConfig(step *types.Step) *container.Config {
maps.Copy(configEnv, step.Environment)
if len(step.Commands) > 0 {
env, entry := common.GenerateContainerConf(step.Commands, e.info.OSType)
env, entry := common.GenerateContainerConf(step.Commands, e.info.OSType, step.WorkingDir)
for k, v := range env {
configEnv[k] = v
}
config.Entrypoint = entry
// step.WorkingDir will be respected by the generated script
config.WorkingDir = step.WorkspaceBase
}
if len(step.Entrypoint) > 0 {
config.Entrypoint = step.Entrypoint

View file

@ -185,7 +185,7 @@ func TestToConfigSmall(t *testing.T) {
"wp_uuid": "09238932",
},
Env: []string{
"CI_SCRIPT=CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiJENJX1dPUktTUEFDRSIKY2QgIiRDSV9XT1JLU1BBQ0UiCgplY2hvICsgJ2dvIHRlc3QnCmdvIHRlc3QK",
"CI_SCRIPT=CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiIgpjZCAiIgoKZWNobyArICdnbyB0ZXN0JwpnbyB0ZXN0Cg==",
"SHELL=/bin/sh",
},
}, conf)
@ -207,36 +207,37 @@ func TestToConfigFull(t *testing.T) {
}
conf := engine.toConfig(&backend.Step{
Name: "test",
UUID: "09238932",
Type: backend.StepTypeCommands,
Image: "golang:1.2.3",
Pull: true,
Detached: true,
Privileged: true,
WorkingDir: "/src/abc",
Environment: map[string]string{"TAGS": "sqlite"},
Commands: []string{"go test", "go vet ./..."},
ExtraHosts: []backend.HostAlias{{Name: "t", IP: "1.2.3.4"}},
Volumes: []string{"/cache:/cache"},
Tmpfs: []string{"/tmp"},
Devices: []string{"/dev/sdc"},
Networks: []backend.Conn{{Name: "extra-net", Aliases: []string{"extra.net"}}},
DNS: []string{"9.9.9.9", "8.8.8.8"},
DNSSearch: nil,
OnFailure: true,
OnSuccess: true,
Failure: "fail",
AuthConfig: backend.Auth{Username: "user", Password: "123456"},
NetworkMode: "bridge",
Ports: []backend.Port{{Number: 21}, {Number: 22}},
Name: "test",
UUID: "09238932",
Type: backend.StepTypeCommands,
Image: "golang:1.2.3",
Pull: true,
Detached: true,
Privileged: true,
WorkingDir: "/src/abc",
WorkspaceBase: "/src",
Environment: map[string]string{"TAGS": "sqlite"},
Commands: []string{"go test", "go vet ./..."},
ExtraHosts: []backend.HostAlias{{Name: "t", IP: "1.2.3.4"}},
Volumes: []string{"/cache:/cache"},
Tmpfs: []string{"/tmp"},
Devices: []string{"/dev/sdc"},
Networks: []backend.Conn{{Name: "extra-net", Aliases: []string{"extra.net"}}},
DNS: []string{"9.9.9.9", "8.8.8.8"},
DNSSearch: nil,
OnFailure: true,
OnSuccess: true,
Failure: "fail",
AuthConfig: backend.Auth{Username: "user", Password: "123456"},
NetworkMode: "bridge",
Ports: []backend.Port{{Number: 21}, {Number: 22}},
})
assert.NotNil(t, conf)
sort.Strings(conf.Env)
assert.EqualValues(t, &container.Config{
Image: "golang:1.2.3",
WorkingDir: "/src/abc",
WorkingDir: "/src",
AttachStdout: true,
AttachStderr: true,
Entrypoint: []string{"/bin/sh", "-c", "echo $CI_SCRIPT | base64 -d | /bin/sh -e"},
@ -245,7 +246,7 @@ func TestToConfigFull(t *testing.T) {
"wp_uuid": "09238932",
},
Env: []string{
"CI_SCRIPT=CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiJENJX1dPUktTUEFDRSIKY2QgIiRDSV9XT1JLU1BBQ0UiCgplY2hvICsgJ2dvIHRlc3QnCmdvIHRlc3QKCmVjaG8gKyAnZ28gdmV0IC4vLi4uJwpnbyB2ZXQgLi8uLi4K",
"CI_SCRIPT=CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiL3NyYy9hYmMiCmNkICIvc3JjL2FiYyIKCmVjaG8gKyAnZ28gdGVzdCcKZ28gdGVzdAoKZWNobyArICdnbyB2ZXQgLi8uLi4nCmdvIHZldCAuLy4uLgo=",
"SHELL=/bin/sh",
"TAGS=sqlite",
},

View file

@ -381,7 +381,6 @@ func (e *kube) TailStep(ctx context.Context, step *types.Step, taskUUID string)
go func() {
defer logs.Close()
defer wc.Close()
defer rc.Close()
_, err = io.Copy(wc, logs)
if err != nil {

View file

@ -183,7 +183,7 @@ func podContainer(step *types.Step, podName, goos string, options BackendOptions
container := v1.Container{
Name: podName,
Image: step.Image,
WorkingDir: step.WorkspaceBase,
WorkingDir: step.WorkingDir,
Ports: containerPorts(step.Ports),
SecurityContext: containerSecurityContext(options.SecurityContext, step.Privileged),
}
@ -193,9 +193,12 @@ func podContainer(step *types.Step, podName, goos string, options BackendOptions
}
if len(step.Commands) > 0 {
scriptEnv, command := common.GenerateContainerConf(step.Commands, goos)
scriptEnv, command := common.GenerateContainerConf(step.Commands, goos, step.WorkingDir)
container.Command = command
maps.Copy(step.Environment, scriptEnv)
// step.WorkingDir will be respected by the generated script
container.WorkingDir = step.WorkspaceBase
}
if len(step.Entrypoint) > 0 {
container.Command = step.Entrypoint

View file

@ -105,7 +105,7 @@ func TestTinyPod(t *testing.T) {
},
{
"name": "CI_SCRIPT",
"value": "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiJENJX1dPUktTUEFDRSIKY2QgIiRDSV9XT1JLU1BBQ0UiCgplY2hvICsgJ2dyYWRsZSBidWlsZCcKZ3JhZGxlIGJ1aWxkCg=="
"value": "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiL3dvb2RwZWNrZXIvc3JjIgpjZCAiL3dvb2RwZWNrZXIvc3JjIgoKZWNobyArICdncmFkbGUgYnVpbGQnCmdyYWRsZSBidWlsZAo="
}
],
"resources": {},
@ -198,7 +198,7 @@ func TestFullPod(t *testing.T) {
},
{
"name": "CI_SCRIPT",
"value": "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiJENJX1dPUktTUEFDRSIKY2QgIiRDSV9XT1JLU1BBQ0UiCgplY2hvICsgJ2dvIGdldCcKZ28gZ2V0CgplY2hvICsgJ2dvIHRlc3QnCmdvIHRlc3QK"
"value": "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IENJX1NDUklQVApta2RpciAtcCAiL3dvb2RwZWNrZXIvc3JjIgpjZCAiL3dvb2RwZWNrZXIvc3JjIgoKZWNobyArICdnbyBnZXQnCmdvIGdldAoKZWNobyArICdnbyB0ZXN0JwpnbyB0ZXN0Cg=="
},
{
"name": "SHELL",

View file

@ -1,3 +1,4 @@
export default {
commentOnReleasedPullRequests: false,
skipLabels: ['skip-release', 'skip-changelog', 'regression', 'backport-done'],
};

View file

@ -362,7 +362,6 @@ func GetRepoPermissions(c *gin.Context) {
func GetRepoBranches(c *gin.Context) {
_store := store.FromContext(c)
repo := session.Repo(c)
user := session.User(c)
_forge, err := server.Config.Services.Manager.ForgeFromRepo(repo)
if err != nil {
log.Error().Err(err).Msg("Cannot get forge from repo")
@ -370,9 +369,15 @@ func GetRepoBranches(c *gin.Context) {
return
}
forge.Refresh(c, _forge, _store, user)
repoUser, err := _store.GetUser(repo.UserID)
if err != nil {
handleDBError(c, err)
return
}
branches, err := _forge.Branches(c, user, repo, session.Pagination(c))
forge.Refresh(c, _forge, _store, repoUser)
branches, err := _forge.Branches(c, repoUser, repo, session.Pagination(c))
if err != nil {
log.Error().Err(err).Msg("failed to load branches")
c.String(http.StatusInternalServerError, "failed to load branches: %s", err)
@ -396,7 +401,6 @@ func GetRepoBranches(c *gin.Context) {
func GetRepoPullRequests(c *gin.Context) {
_store := store.FromContext(c)
repo := session.Repo(c)
user := session.User(c)
_forge, err := server.Config.Services.Manager.ForgeFromRepo(repo)
if err != nil {
log.Error().Err(err).Msg("Cannot get forge from repo")
@ -404,9 +408,15 @@ func GetRepoPullRequests(c *gin.Context) {
return
}
forge.Refresh(c, _forge, _store, user)
repoUser, err := _store.GetUser(repo.UserID)
if err != nil {
handleDBError(c, err)
return
}
prs, err := _forge.PullRequests(c, user, repo, session.Pagination(c))
forge.Refresh(c, _forge, _store, repoUser)
prs, err := _forge.PullRequests(c, repoUser, repo, session.Pagination(c))
if err != nil {
_ = c.AbortWithError(http.StatusInternalServerError, err)
return

View file

@ -16,6 +16,7 @@
package gitea
import (
"fmt"
"io"
"net/http"
"strings"
@ -111,6 +112,11 @@ func parsePullRequestHook(payload io.Reader) (*model.Repo, *model.Pipeline, erro
return nil, nil, err
}
if pr.PullRequest == nil {
// this should never have happened but it did - so we check
return nil, nil, fmt.Errorf("parsed pull_request webhook does not contain pull_request info")
}
// Don't trigger pipelines for non-code changes ...
if pr.Action != actionOpen && pr.Action != actionSync && pr.Action != actionClose {
log.Debug().Msgf("pull_request action is '%s' and no open or sync", pr.Action)

View file

@ -42,8 +42,8 @@ type Pipeline struct {
Message string `json:"message" xorm:"TEXT 'message'"`
Timestamp int64 `json:"timestamp" xorm:"'timestamp'"`
Sender string `json:"sender" xorm:"sender"` // uses reported user for webhooks and name of cron for cron pipelines
Avatar string `json:"author_avatar" xorm:"avatar"`
Email string `json:"author_email" xorm:"email"`
Avatar string `json:"author_avatar" xorm:"varchar(500) avatar"`
Email string `json:"author_email" xorm:"varchar(500) email"`
ForgeURL string `json:"forge_url" xorm:"forge_url"`
Reviewer string `json:"reviewed_by" xorm:"reviewer"`
Reviewed int64 `json:"reviewed" xorm:"reviewed"`

View file

@ -59,8 +59,8 @@ const processTimeInterval = 100 * time.Millisecond
var ErrWorkerKicked = fmt.Errorf("worker was kicked")
// New returns a new fifo queue.
func New(ctx context.Context) Queue {
// NewMemoryQueue returns a new fifo queue.
func NewMemoryQueue(ctx context.Context) Queue {
q := &fifo{
ctx: ctx,
workers: map[*worker]struct{}{},

View file

@ -32,7 +32,7 @@ func TestFifo(t *testing.T) {
want := &model.Task{ID: "1"}
ctx := context.Background()
q := New(ctx)
q := NewMemoryQueue(ctx)
assert.NoError(t, q.Push(ctx, want))
info := q.Info(ctx)
assert.Len(t, info.Pending, 1, "expect task in pending queue")
@ -55,7 +55,7 @@ func TestFifoExpire(t *testing.T) {
want := &model.Task{ID: "1"}
ctx, cancel := context.WithCancelCause(context.Background())
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
q.extension = 0
assert.NoError(t, q.Push(ctx, want))
info := q.Info(ctx)
@ -78,7 +78,7 @@ func TestFifoWait(t *testing.T) {
want := &model.Task{ID: "1"}
ctx := context.Background()
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.Push(ctx, want))
got, err := q.Poll(ctx, 1, filterFnTrue)
@ -101,7 +101,7 @@ func TestFifoEvict(t *testing.T) {
t1 := &model.Task{ID: "1"}
ctx := context.Background()
q := New(ctx)
q := NewMemoryQueue(ctx)
assert.NoError(t, q.Push(ctx, t1))
info := q.Info(ctx)
assert.Len(t, info.Pending, 1, "expect task in pending queue")
@ -125,7 +125,7 @@ func TestFifoDependencies(t *testing.T) {
DepStatus: make(map[string]model.StatusValue),
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.PushAtOnce(ctx, []*model.Task{task2, task1}))
got, err := q.Poll(ctx, 1, filterFnTrue)
@ -158,7 +158,7 @@ func TestFifoErrors(t *testing.T) {
RunOn: []string{"success", "failure"},
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.PushAtOnce(ctx, []*model.Task{task2, task3, task1}))
got, err := q.Poll(ctx, 1, filterFnTrue)
@ -194,7 +194,7 @@ func TestFifoErrors2(t *testing.T) {
DepStatus: make(map[string]model.StatusValue),
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.PushAtOnce(ctx, []*model.Task{task2, task3, task1}))
for i := 0; i < 2; i++ {
@ -234,7 +234,7 @@ func TestFifoErrorsMultiThread(t *testing.T) {
DepStatus: make(map[string]model.StatusValue),
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.PushAtOnce(ctx, []*model.Task{task2, task3, task1}))
obtainedWorkCh := make(chan *model.Task)
@ -314,7 +314,7 @@ func TestFifoTransitiveErrors(t *testing.T) {
DepStatus: make(map[string]model.StatusValue),
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.PushAtOnce(ctx, []*model.Task{task2, task3, task1}))
got, err := q.Poll(ctx, 1, filterFnTrue)
@ -353,7 +353,7 @@ func TestFifoCancel(t *testing.T) {
RunOn: []string{"success", "failure"},
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.PushAtOnce(ctx, []*model.Task{task2, task3, task1}))
_, _ = q.Poll(ctx, 1, filterFnTrue)
@ -371,7 +371,7 @@ func TestFifoPause(t *testing.T) {
ID: "1",
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
var wg sync.WaitGroup
wg.Add(1)
go func() {
@ -402,7 +402,7 @@ func TestFifoPauseResume(t *testing.T) {
ID: "1",
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
q.Pause()
assert.NoError(t, q.Push(ctx, task1))
q.Resume()
@ -429,7 +429,7 @@ func TestWaitingVsPending(t *testing.T) {
RunOn: []string{"success", "failure"},
}
q, _ := New(ctx).(*fifo)
q, _ := NewMemoryQueue(ctx).(*fifo)
assert.NoError(t, q.PushAtOnce(ctx, []*model.Task{task2, task3, task1}))
got, _ := q.Poll(ctx, 1, filterFnTrue)
@ -519,7 +519,7 @@ func TestShouldRun(t *testing.T) {
func TestFifoWithScoring(t *testing.T) {
ctx := context.Background()
q := New(ctx)
q := NewMemoryQueue(ctx)
// Create tasks with different labels
tasks := []*model.Task{

View file

@ -17,9 +17,11 @@ package queue
import (
"context"
"errors"
"fmt"
"strings"
"go.woodpecker-ci.org/woodpecker/v2/server/model"
"go.woodpecker-ci.org/woodpecker/v2/server/store"
)
var (
@ -115,3 +117,33 @@ type Queue interface {
// KickAgentWorkers kicks all workers for a given agent.
KickAgentWorkers(agentID int64)
}
// Config holds the configuration for the queue.
type Config struct {
Backend Type
Store store.Store
}
// Queue type.
type Type string
const (
TypeMemory Type = "memory"
)
// New creates a new queue based on the provided configuration.
func New(ctx context.Context, config Config) (Queue, error) {
var q Queue
switch config.Backend {
case TypeMemory:
q = NewMemoryQueue(ctx)
if config.Store != nil {
q = WithTaskStore(ctx, q, config.Store)
}
default:
return nil, fmt.Errorf("unsupported queue backend: %s", config.Backend)
}
return q, nil
}

View file

@ -14,9 +14,14 @@
package store
import "time"
type XORM struct {
Log bool
ShowSQL bool
Log bool
ShowSQL bool
MaxIdleConns int
MaxOpenConns int
ConnMaxLifetime time.Duration
}
// Opts are options for a new database connection.

View file

@ -45,6 +45,9 @@ func NewEngine(opts *store.Opts) (store.Store, error) {
logger := newXORMLogger(level)
engine.SetLogger(logger)
engine.ShowSQL(opts.XORM.ShowSQL)
engine.SetMaxOpenConns(opts.XORM.MaxOpenConns)
engine.SetMaxIdleConns(opts.XORM.MaxIdleConns)
engine.SetConnMaxLifetime(opts.XORM.ConnMaxLifetime)
return &storage{
engine: engine,

View file

@ -5,18 +5,14 @@ import (
"xorm.io/xorm"
)
type v000Migrations struct {
Name string `xorm:"UNIQUE"`
}
func (m *v000Migrations) TableName() string {
return "migrations"
}
var legacyToXormigrate = xormigrate.Migration{
ID: "legacy-to-xormigrate",
MigrateSession: func(sess *xorm.Session) error {
var mig []*v000Migrations
type migrations struct {
Name string `xorm:"UNIQUE"`
}
var mig []*migrations
if err := sess.Find(&mig); err != nil {
return err
}

View file

@ -19,30 +19,39 @@ import (
"src.techknowlogick.com/xormigrate"
"xorm.io/xorm"
"go.woodpecker-ci.org/woodpecker/v2/server/model"
)
var addOrgID = xormigrate.Migration{
ID: "add-org-id",
MigrateSession: func(sess *xorm.Session) error {
if err := sess.Sync(new(userV009)); err != nil {
type users struct {
ID int64 `xorm:"pk autoincr 'user_id'"`
Login string `xorm:"UNIQUE 'user_login'"`
OrgID int64 `xorm:"user_org_id"`
}
type orgs struct {
ID int64 `xorm:"pk autoincr 'id'"`
Name string `xorm:"UNIQUE 'name'"`
IsUser bool `xorm:"is_user"`
}
if err := sess.Sync(new(users), new(orgs)); err != nil {
return fmt.Errorf("sync new models failed: %w", err)
}
// get all users
var users []*userV009
if err := sess.Find(&users); err != nil {
var us []*users
if err := sess.Find(&us); err != nil {
return fmt.Errorf("find all repos failed: %w", err)
}
for _, user := range users {
org := &model.Org{}
for _, user := range us {
org := &orgs{}
has, err := sess.Where("name = ?", user.Login).Get(org)
if err != nil {
return fmt.Errorf("getting org failed: %w", err)
} else if !has {
org = &model.Org{
org = &orgs{
Name: user.Login,
IsUser: true,
}

View file

@ -19,23 +19,19 @@ import (
"xorm.io/xorm"
)
type oldSecret004 struct {
ID int64 `json:"id" xorm:"pk autoincr 'secret_id'"`
PluginsOnly bool `json:"plugins_only" xorm:"secret_plugins_only"`
SkipVerify bool `json:"-" xorm:"secret_skip_verify"`
Conceal bool `json:"-" xorm:"secret_conceal"`
Images []string `json:"images" xorm:"json 'secret_images'"`
}
func (oldSecret004) TableName() string {
return "secrets"
}
var removePluginOnlyOptionFromSecretsTable = xormigrate.Migration{
ID: "remove-plugin-only-option-from-secrets-table",
MigrateSession: func(sess *xorm.Session) (err error) {
type secrets struct {
ID int64 `json:"id" xorm:"pk autoincr 'secret_id'"`
PluginsOnly bool `json:"plugins_only" xorm:"secret_plugins_only"`
SkipVerify bool `json:"-" xorm:"secret_skip_verify"`
Conceal bool `json:"-" xorm:"secret_conceal"`
Images []string `json:"images" xorm:"json 'secret_images'"`
}
// make sure plugin_only column exists
if err := sess.Sync(new(oldSecret004)); err != nil {
if err := sess.Sync(new(secrets)); err != nil {
return err
}

View file

@ -17,41 +17,35 @@ package migration
import (
"src.techknowlogick.com/xormigrate"
"xorm.io/xorm"
errorTypes "go.woodpecker-ci.org/woodpecker/v2/pipeline/errors/types"
)
// perPage005 set the size of the slice to read per page.
var perPage005 = 100
type pipeline005 struct {
ID int64 `json:"id" xorm:"pk autoincr 'pipeline_id'"`
Error string `json:"error" xorm:"LONGTEXT 'pipeline_error'"` // old error format
Errors []*errorTypes.PipelineError `json:"errors" xorm:"json 'pipeline_errors'"` // new error format
}
func (pipeline005) TableName() string {
return "pipelines"
}
type PipelineError005 struct {
Type string `json:"type"`
Message string `json:"message"`
IsWarning bool `json:"is_warning"`
Data any `json:"data"`
}
var convertToNewPipelineErrorFormat = xormigrate.Migration{
ID: "convert-to-new-pipeline-error-format",
Long: true,
MigrateSession: func(sess *xorm.Session) (err error) {
type pipelineError struct {
Type string `json:"type"`
Message string `json:"message"`
IsWarning bool `json:"is_warning"`
Data any `json:"data"`
}
type pipelines struct {
ID int64 `json:"id" xorm:"pk autoincr 'pipeline_id'"`
Error string `json:"error" xorm:"LONGTEXT 'pipeline_error'"` // old error format
Errors []*pipelineError `json:"errors" xorm:"json 'pipeline_errors'"` // new error format
}
// make sure pipeline_error column exists
if err := sess.Sync(new(pipeline005)); err != nil {
if err := sess.Sync(new(pipelines)); err != nil {
return err
}
page := 0
oldPipelines := make([]*pipeline005, 0, perPage005)
oldPipelines := make([]*pipelines, 0, perPage005)
for {
oldPipelines = oldPipelines[:0]
@ -62,9 +56,9 @@ var convertToNewPipelineErrorFormat = xormigrate.Migration{
}
for _, oldPipeline := range oldPipelines {
var newPipeline pipeline005
var newPipeline pipelines
newPipeline.ID = oldPipeline.ID
newPipeline.Errors = []*errorTypes.PipelineError{{
newPipeline.Errors = []*pipelineError{{
Type: "generic",
Message: oldPipeline.Error,
}}

View file

@ -19,32 +19,24 @@ import (
"xorm.io/xorm"
)
type oldRegistry007 struct {
ID int64 `json:"id" xorm:"pk autoincr 'registry_id'"`
Token string `json:"token" xorm:"TEXT 'registry_token'"`
Email string `json:"email" xorm:"varchar(500) 'registry_email'"`
}
func (oldRegistry007) TableName() string {
return "registry"
}
type oldPipeline007 struct {
ID int64 `json:"id" xorm:"pk autoincr 'pipeline_id'"`
ConfigID int64 `json:"-" xorm:"pipeline_config_id"`
Enqueued int64 `json:"enqueued_at" xorm:"pipeline_enqueued"`
CloneURL string `json:"clone_url" xorm:"pipeline_clone_url"`
}
// TableName return database table name for xorm.
func (oldPipeline007) TableName() string {
return "pipelines"
}
var cleanRegistryPipeline = xormigrate.Migration{
ID: "clean-registry-pipeline",
MigrateSession: func(sess *xorm.Session) (err error) {
if err := sess.Sync(new(oldRegistry007), new(oldPipeline007)); err != nil {
type registry struct {
ID int64 `json:"id" xorm:"pk autoincr 'registry_id'"`
Token string `json:"token" xorm:"TEXT 'registry_token'"`
Email string `json:"email" xorm:"varchar(500) 'registry_email'"`
}
type pipelines struct {
ID int64 `json:"id" xorm:"pk autoincr 'pipeline_id'"`
ConfigID int64 `json:"-" xorm:"pipeline_config_id"`
Enqueued int64 `json:"enqueued_at" xorm:"pipeline_enqueued"`
CloneURL string `json:"clone_url" xorm:"pipeline_clone_url"`
}
// ensure columns to drop exist
if err := sess.Sync(new(registry), new(pipelines)); err != nil {
return err
}

View file

@ -77,7 +77,7 @@ func (repoV008) TableName() string {
return "repos"
}
type forgeV008 struct {
type forge struct {
ID int64 `xorm:"pk autoincr 'id'"`
Type model.ForgeType `xorm:"VARCHAR(250) 'type'"`
URL string `xorm:"VARCHAR(500) 'url'"`
@ -88,14 +88,14 @@ type forgeV008 struct {
AdditionalOptions map[string]any `xorm:"json 'additional_options'"`
}
func (forgeV008) TableName() string {
func (forge) TableName() string {
return "forge"
}
var setForgeID = xormigrate.Migration{
ID: "set-forge-id",
MigrateSession: func(sess *xorm.Session) (err error) {
if err := sess.Sync(new(userV008), new(repoV008), new(forgeV008), new(model.Org)); err != nil {
if err := sess.Sync(new(userV008), new(repoV008), new(forge), new(model.Org)); err != nil {
return fmt.Errorf("sync new models failed: %w", err)
}

View file

@ -19,230 +19,182 @@ import (
"src.techknowlogick.com/xormigrate"
"xorm.io/xorm"
"go.woodpecker-ci.org/woodpecker/v2/pipeline/errors/types"
"go.woodpecker-ci.org/woodpecker/v2/server/model"
)
type configV009 struct {
ID int64 `xorm:"pk autoincr 'config_id'"`
RepoID int64 `xorm:"UNIQUE(s) 'config_repo_id'"`
Hash string `xorm:"UNIQUE(s) 'config_hash'"`
Name string `xorm:"UNIQUE(s) 'config_name'"`
Data []byte `xorm:"LONGBLOB 'config_data'"`
}
func (configV009) TableName() string {
return "config"
}
type cronV009 struct {
ID int64 `xorm:"pk autoincr 'i_d'"`
Name string `xorm:"name UNIQUE(s) INDEX"`
RepoID int64 `xorm:"repo_id UNIQUE(s) INDEX"`
CreatorID int64 `xorm:"creator_id INDEX"`
NextExec int64 `xorm:"next_exec"`
Schedule string `xorm:"schedule NOT NULL"`
Created int64 `xorm:"created NOT NULL DEFAULT 0"`
Branch string `xorm:"branch"`
}
func (cronV009) TableName() string {
return "crons"
}
type permV009 struct {
UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL 'perm_user_id'"`
RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL 'perm_repo_id'"`
Pull bool `xorm:"perm_pull"`
Push bool `xorm:"perm_push"`
Admin bool `xorm:"perm_admin"`
Synced int64 `xorm:"perm_synced"`
}
func (permV009) TableName() string {
return "perms"
}
type pipelineV009 struct {
ID int64 `xorm:"pk autoincr 'pipeline_id'"`
RepoID int64 `xorm:"UNIQUE(s) INDEX 'pipeline_repo_id'"`
Number int64 `xorm:"UNIQUE(s) 'pipeline_number'"`
Author string `xorm:"INDEX 'pipeline_author'"`
Parent int64 `xorm:"pipeline_parent"`
Event model.WebhookEvent `xorm:"pipeline_event"`
Status model.StatusValue `xorm:"INDEX 'pipeline_status'"`
Errors []*types.PipelineError `xorm:"json 'pipeline_errors'"`
Created int64 `xorm:"pipeline_created"`
Started int64 `xorm:"pipeline_started"`
Finished int64 `xorm:"pipeline_finished"`
Deploy string `xorm:"pipeline_deploy"`
DeployTask string `xorm:"pipeline_deploy_task"`
Commit string `xorm:"pipeline_commit"`
Branch string `xorm:"pipeline_branch"`
Ref string `xorm:"pipeline_ref"`
Refspec string `xorm:"pipeline_refspec"`
Title string `xorm:"pipeline_title"`
Message string `xorm:"TEXT 'pipeline_message'"`
Timestamp int64 `xorm:"pipeline_timestamp"`
Sender string `xorm:"pipeline_sender"` // uses reported user for webhooks and name of cron for cron pipelines
Avatar string `xorm:"pipeline_avatar"`
Email string `xorm:"pipeline_email"`
ForgeURL string `xorm:"pipeline_forge_url"`
Reviewer string `xorm:"pipeline_reviewer"`
Reviewed int64 `xorm:"pipeline_reviewed"`
}
func (pipelineV009) TableName() string {
return "pipelines"
}
type redirectionV009 struct {
ID int64 `xorm:"pk autoincr 'redirection_id'"`
}
func (r redirectionV009) TableName() string {
return "redirections"
}
type registryV009 struct {
ID int64 `xorm:"pk autoincr 'registry_id'"`
RepoID int64 `xorm:"UNIQUE(s) INDEX 'registry_repo_id'"`
Address string `xorm:"UNIQUE(s) INDEX 'registry_addr'"`
Username string `xorm:"varchar(2000) 'registry_username'"`
Password string `xorm:"TEXT 'registry_password'"`
}
func (registryV009) TableName() string {
return "registry"
}
type repoV009 struct {
ID int64 `xorm:"pk autoincr 'repo_id'"`
UserID int64 `xorm:"repo_user_id"`
OrgID int64 `xorm:"repo_org_id"`
Owner string `xorm:"UNIQUE(name) 'repo_owner'"`
Name string `xorm:"UNIQUE(name) 'repo_name'"`
FullName string `xorm:"UNIQUE 'repo_full_name'"`
Avatar string `xorm:"varchar(500) 'repo_avatar'"`
ForgeURL string `xorm:"varchar(1000) 'repo_forge_url'"`
Clone string `xorm:"varchar(1000) 'repo_clone'"`
CloneSSH string `xorm:"varchar(1000) 'repo_clone_ssh'"`
Branch string `xorm:"varchar(500) 'repo_branch'"`
SCMKind model.SCMKind `xorm:"varchar(50) 'repo_scm'"`
PREnabled bool `xorm:"DEFAULT TRUE 'repo_pr_enabled'"`
Timeout int64 `xorm:"repo_timeout"`
Visibility model.RepoVisibility `xorm:"varchar(10) 'repo_visibility'"`
IsSCMPrivate bool `xorm:"repo_private"`
IsTrusted bool `xorm:"repo_trusted"`
IsGated bool `xorm:"repo_gated"`
IsActive bool `xorm:"repo_active"`
AllowPull bool `xorm:"repo_allow_pr"`
AllowDeploy bool `xorm:"repo_allow_deploy"`
Config string `xorm:"varchar(500) 'repo_config_path'"`
Hash string `xorm:"varchar(500) 'repo_hash'"`
}
func (repoV009) TableName() string {
return "repos"
}
type secretV009 struct {
ID int64 `xorm:"pk autoincr 'secret_id'"`
OrgID int64 `xorm:"NOT NULL DEFAULT 0 UNIQUE(s) INDEX 'secret_org_id'"`
RepoID int64 `xorm:"NOT NULL DEFAULT 0 UNIQUE(s) INDEX 'secret_repo_id'"`
Name string `xorm:"NOT NULL UNIQUE(s) INDEX 'secret_name'"`
Value string `xorm:"TEXT 'secret_value'"`
Images []string `xorm:"json 'secret_images'"`
Events []model.WebhookEvent `xorm:"json 'secret_events'"`
}
func (secretV009) TableName() string {
return "secrets"
}
type stepV009 struct {
ID int64 `xorm:"pk autoincr 'step_id'"`
UUID string `xorm:"INDEX 'step_uuid'"`
PipelineID int64 `xorm:"UNIQUE(s) INDEX 'step_pipeline_id'"`
PID int `xorm:"UNIQUE(s) 'step_pid'"`
PPID int `xorm:"step_ppid"`
Name string `xorm:"step_name"`
State model.StatusValue `xorm:"step_state"`
Error string `xorm:"TEXT 'step_error'"`
Failure string `xorm:"step_failure"`
ExitCode int `xorm:"step_exit_code"`
Started int64 `xorm:"step_started"`
Stopped int64 `xorm:"step_stopped"`
Type model.StepType `xorm:"step_type"`
}
func (stepV009) TableName() string {
return "steps"
}
type taskV009 struct {
ID string `xorm:"PK UNIQUE 'task_id'"`
Data []byte `xorm:"LONGBLOB 'task_data'"`
Labels map[string]string `xorm:"json 'task_labels'"`
Dependencies []string `xorm:"json 'task_dependencies'"`
RunOn []string `xorm:"json 'task_run_on'"`
DepStatus map[string]model.StatusValue `xorm:"json 'task_dep_status'"`
}
func (taskV009) TableName() string {
return "tasks"
}
type userV009 struct {
ID int64 `xorm:"pk autoincr 'user_id'"`
Login string `xorm:"UNIQUE 'user_login'"`
Token string `xorm:"TEXT 'user_token'"`
Secret string `xorm:"TEXT 'user_secret'"`
Expiry int64 `xorm:"user_expiry"`
Email string `xorm:" varchar(500) 'user_email'"`
Avatar string `xorm:" varchar(500) 'user_avatar'"`
Admin bool `xorm:"user_admin"`
Hash string `xorm:"UNIQUE varchar(500) 'user_hash'"`
OrgID int64 `xorm:"user_org_id"`
}
func (userV009) TableName() string {
return "users"
}
type workflowV009 struct {
ID int64 `xorm:"pk autoincr 'workflow_id'"`
PipelineID int64 `xorm:"UNIQUE(s) INDEX 'workflow_pipeline_id'"`
PID int `xorm:"UNIQUE(s) 'workflow_pid'"`
Name string `xorm:"workflow_name"`
State model.StatusValue `xorm:"workflow_state"`
Error string `xorm:"TEXT 'workflow_error'"`
Started int64 `xorm:"workflow_started"`
Stopped int64 `xorm:"workflow_stopped"`
AgentID int64 `xorm:"workflow_agent_id"`
Platform string `xorm:"workflow_platform"`
Environ map[string]string `xorm:"json 'workflow_environ'"`
AxisID int `xorm:"workflow_axis_id"`
}
func (workflowV009) TableName() string {
return "workflows"
}
type serverConfigV009 struct {
Key string `xorm:"pk 'key'"`
Value string `xorm:"value"`
}
func (serverConfigV009) TableName() string {
return "server_config"
}
var unifyColumnsTables = xormigrate.Migration{
ID: "unify-columns-tables",
MigrateSession: func(sess *xorm.Session) (err error) {
if err := sess.Sync(new(configV009), new(cronV009), new(permV009), new(pipelineV009), new(redirectionV009), new(registryV009), new(repoV009), new(secretV009), new(stepV009), new(taskV009), new(userV009), new(workflowV009), new(serverConfigV009)); err != nil {
type config struct {
ID int64 `xorm:"pk autoincr 'config_id'"`
RepoID int64 `xorm:"UNIQUE(s) 'config_repo_id'"`
Hash string `xorm:"UNIQUE(s) 'config_hash'"`
Name string `xorm:"UNIQUE(s) 'config_name'"`
Data []byte `xorm:"LONGBLOB 'config_data'"`
}
type crons struct {
ID int64 `xorm:"pk autoincr 'i_d'"`
Name string `xorm:"name UNIQUE(s) INDEX"`
RepoID int64 `xorm:"repo_id UNIQUE(s) INDEX"`
CreatorID int64 `xorm:"creator_id INDEX"`
NextExec int64 `xorm:"next_exec"`
Schedule string `xorm:"schedule NOT NULL"`
Created int64 `xorm:"created NOT NULL DEFAULT 0"`
Branch string `xorm:"branch"`
}
type perms struct {
UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL 'perm_user_id'"`
RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL 'perm_repo_id'"`
Pull bool `xorm:"perm_pull"`
Push bool `xorm:"perm_push"`
Admin bool `xorm:"perm_admin"`
Synced int64 `xorm:"perm_synced"`
}
type pipelineError struct {
Type string `json:"type"`
Message string `json:"message"`
IsWarning bool `json:"is_warning"`
Data any `json:"data"`
}
type pipelines struct {
ID int64 `xorm:"pk autoincr 'pipeline_id'"`
RepoID int64 `xorm:"UNIQUE(s) INDEX 'pipeline_repo_id'"`
Number int64 `xorm:"UNIQUE(s) 'pipeline_number'"`
Author string `xorm:"INDEX 'pipeline_author'"`
Parent int64 `xorm:"pipeline_parent"`
Event string `xorm:"pipeline_event"`
Status string `xorm:"INDEX 'pipeline_status'"`
Errors []*pipelineError `xorm:"json 'pipeline_errors'"`
Created int64 `xorm:"pipeline_created"`
Started int64 `xorm:"pipeline_started"`
Finished int64 `xorm:"pipeline_finished"`
Deploy string `xorm:"pipeline_deploy"`
DeployTask string `xorm:"pipeline_deploy_task"`
Commit string `xorm:"pipeline_commit"`
Branch string `xorm:"pipeline_branch"`
Ref string `xorm:"pipeline_ref"`
Refspec string `xorm:"pipeline_refspec"`
Title string `xorm:"pipeline_title"`
Message string `xorm:"TEXT 'pipeline_message'"`
Timestamp int64 `xorm:"pipeline_timestamp"`
Sender string `xorm:"pipeline_sender"` // uses reported user for webhooks and name of cron for cron pipelines
Avatar string `xorm:"pipeline_avatar"`
Email string `xorm:"pipeline_email"`
ForgeURL string `xorm:"pipeline_forge_url"`
Reviewer string `xorm:"pipeline_reviewer"`
Reviewed int64 `xorm:"pipeline_reviewed"`
}
type redirections struct {
ID int64 `xorm:"pk autoincr 'redirection_id'"`
}
type registry struct {
ID int64 `xorm:"pk autoincr 'registry_id'"`
RepoID int64 `xorm:"UNIQUE(s) INDEX 'registry_repo_id'"`
Address string `xorm:"UNIQUE(s) INDEX 'registry_addr'"`
Username string `xorm:"varchar(2000) 'registry_username'"`
Password string `xorm:"TEXT 'registry_password'"`
}
type repos struct {
ID int64 `xorm:"pk autoincr 'repo_id'"`
UserID int64 `xorm:"repo_user_id"`
OrgID int64 `xorm:"repo_org_id"`
Owner string `xorm:"UNIQUE(name) 'repo_owner'"`
Name string `xorm:"UNIQUE(name) 'repo_name'"`
FullName string `xorm:"UNIQUE 'repo_full_name'"`
Avatar string `xorm:"varchar(500) 'repo_avatar'"`
ForgeURL string `xorm:"varchar(1000) 'repo_forge_url'"`
Clone string `xorm:"varchar(1000) 'repo_clone'"`
CloneSSH string `xorm:"varchar(1000) 'repo_clone_ssh'"`
Branch string `xorm:"varchar(500) 'repo_branch'"`
SCMKind string `xorm:"varchar(50) 'repo_scm'"`
PREnabled bool `xorm:"DEFAULT TRUE 'repo_pr_enabled'"`
Timeout int64 `xorm:"repo_timeout"`
Visibility string `xorm:"varchar(10) 'repo_visibility'"`
IsSCMPrivate bool `xorm:"repo_private"`
IsTrusted bool `xorm:"repo_trusted"`
IsGated bool `xorm:"repo_gated"`
IsActive bool `xorm:"repo_active"`
AllowPull bool `xorm:"repo_allow_pr"`
AllowDeploy bool `xorm:"repo_allow_deploy"`
Config string `xorm:"varchar(500) 'repo_config_path'"`
Hash string `xorm:"varchar(500) 'repo_hash'"`
}
type secrets struct {
ID int64 `xorm:"pk autoincr 'secret_id'"`
OrgID int64 `xorm:"NOT NULL DEFAULT 0 UNIQUE(s) INDEX 'secret_org_id'"`
RepoID int64 `xorm:"NOT NULL DEFAULT 0 UNIQUE(s) INDEX 'secret_repo_id'"`
Name string `xorm:"NOT NULL UNIQUE(s) INDEX 'secret_name'"`
Value string `xorm:"TEXT 'secret_value'"`
Images []string `xorm:"json 'secret_images'"`
Events []string `xorm:"json 'secret_events'"`
}
type steps struct {
ID int64 `xorm:"pk autoincr 'step_id'"`
UUID string `xorm:"INDEX 'step_uuid'"`
PipelineID int64 `xorm:"UNIQUE(s) INDEX 'step_pipeline_id'"`
PID int `xorm:"UNIQUE(s) 'step_pid'"`
PPID int `xorm:"step_ppid"`
Name string `xorm:"step_name"`
State string `xorm:"step_state"`
Error string `xorm:"TEXT 'step_error'"`
Failure string `xorm:"step_failure"`
ExitCode int `xorm:"step_exit_code"`
Started int64 `xorm:"step_started"`
Stopped int64 `xorm:"step_stopped"`
Type string `xorm:"step_type"`
}
type tasks struct {
ID string `xorm:"PK UNIQUE 'task_id'"`
Data []byte `xorm:"LONGBLOB 'task_data'"`
Labels map[string]string `xorm:"json 'task_labels'"`
Dependencies []string `xorm:"json 'task_dependencies'"`
RunOn []string `xorm:"json 'task_run_on'"`
DepStatus map[string]string `xorm:"json 'task_dep_status'"`
}
type users struct {
ID int64 `xorm:"pk autoincr 'user_id'"`
Login string `xorm:"UNIQUE 'user_login'"`
Token string `xorm:"TEXT 'user_token'"`
Secret string `xorm:"TEXT 'user_secret'"`
Expiry int64 `xorm:"user_expiry"`
Email string `xorm:" varchar(500) 'user_email'"`
Avatar string `xorm:" varchar(500) 'user_avatar'"`
Admin bool `xorm:"user_admin"`
Hash string `xorm:"UNIQUE varchar(500) 'user_hash'"`
OrgID int64 `xorm:"user_org_id"`
}
type workflows struct {
ID int64 `xorm:"pk autoincr 'workflow_id'"`
PipelineID int64 `xorm:"UNIQUE(s) INDEX 'workflow_pipeline_id'"`
PID int `xorm:"UNIQUE(s) 'workflow_pid'"`
Name string `xorm:"workflow_name"`
State string `xorm:"workflow_state"`
Error string `xorm:"TEXT 'workflow_error'"`
Started int64 `xorm:"workflow_started"`
Stopped int64 `xorm:"workflow_stopped"`
AgentID int64 `xorm:"workflow_agent_id"`
Platform string `xorm:"workflow_platform"`
Environ map[string]string `xorm:"json 'workflow_environ'"`
AxisID int `xorm:"workflow_axis_id"`
}
type serverConfig struct {
Key string `xorm:"pk 'key'"`
Value string `xorm:"value"`
}
if err := sess.Sync(new(config), new(crons), new(perms), new(pipelines), new(redirections), new(registry), new(repos), new(secrets), new(steps), new(tasks), new(users), new(workflows), new(serverConfig)); err != nil {
return fmt.Errorf("sync models failed: %w", err)
}

View file

@ -21,26 +21,17 @@ import (
"xorm.io/xorm"
)
type stepV012 struct {
Finished int64 `xorm:"stopped"`
}
func (stepV012) TableName() string {
return "steps"
}
type workflowV012 struct {
Finished int64 `xorm:"stopped"`
}
func (workflowV012) TableName() string {
return "workflows"
}
var renameStartEndTime = xormigrate.Migration{
ID: "rename-start-end-time",
MigrateSession: func(sess *xorm.Session) (err error) {
if err := sess.Sync(new(stepV012), new(workflowV012)); err != nil {
type steps struct {
Finished int64 `xorm:"stopped"`
}
type workflows struct {
Finished int64 `xorm:"stopped"`
}
if err := sess.Sync(new(steps), new(workflows)); err != nil {
return fmt.Errorf("sync models failed: %w", err)
}

View file

@ -23,25 +23,21 @@ import (
"go.woodpecker-ci.org/woodpecker/v2/server/model"
)
type agentV015 struct {
ID int64 `xorm:"pk autoincr 'id'"`
OwnerID int64 `xorm:"INDEX 'owner_id'"`
OrgID int64 `xorm:"INDEX 'org_id'"`
}
func (agentV015) TableName() string {
return "agents"
}
var addOrgAgents = xormigrate.Migration{
ID: "add-org-agents",
MigrateSession: func(sess *xorm.Session) (err error) {
if err := sess.Sync(new(agentV015)); err != nil {
type agents struct {
ID int64 `xorm:"pk autoincr 'id'"`
OwnerID int64 `xorm:"INDEX 'owner_id'"`
OrgID int64 `xorm:"INDEX 'org_id'"`
}
if err := sess.Sync(new(agents)); err != nil {
return fmt.Errorf("sync models failed: %w", err)
}
// Update all existing agents to be global agents
_, err = sess.Cols("org_id").Update(&model.Agent{
_, err = sess.Cols("org_id").Update(&agents{
OrgID: model.IDNotSet,
})
return err

View file

@ -21,19 +21,15 @@ import (
"xorm.io/xorm"
)
type agentV016 struct {
ID int64 `xorm:"pk autoincr 'id'"`
CustomLabels map[string]string `xorm:"JSON 'custom_labels'"`
}
func (agentV016) TableName() string {
return "agents"
}
var addCustomLabelsToAgent = xormigrate.Migration{
ID: "add-custom-labels-to-agent",
MigrateSession: func(sess *xorm.Session) (err error) {
if err := sess.Sync(new(agentV016)); err != nil {
type agents struct {
ID int64 `xorm:"pk autoincr 'id'"`
CustomLabels map[string]string `xorm:"JSON 'custom_labels'"`
}
if err := sess.Sync(new(agents)); err != nil {
return fmt.Errorf("sync models failed: %w", err)
}
return nil

View file

@ -23,24 +23,20 @@ import (
"go.woodpecker-ci.org/woodpecker/v2/server/model"
)
type repoV035 struct {
ID int64 `xorm:"pk autoincr 'id'"`
IsTrusted bool `xorm:"'trusted'"`
Trusted model.TrustedConfiguration `xorm:"json 'trusted_conf'"`
}
func (repoV035) TableName() string {
return "repos"
}
var splitTrusted = xormigrate.Migration{
ID: "split-trusted",
MigrateSession: func(sess *xorm.Session) error {
if err := sess.Sync(new(repoV035)); err != nil {
type repos struct {
ID int64 `xorm:"pk autoincr 'id'"`
IsTrusted bool `xorm:"'trusted'"`
Trusted model.TrustedConfiguration `xorm:"json 'trusted_conf'"`
}
if err := sess.Sync(new(repos)); err != nil {
return fmt.Errorf("sync new models failed: %w", err)
}
if _, err := sess.Where("trusted = ?", false).Cols("trusted_conf").Update(&repoV035{
if _, err := sess.Where("trusted = ?", false).Cols("trusted_conf").Update(&repos{
Trusted: model.TrustedConfiguration{
Network: false,
Security: false,
@ -50,7 +46,7 @@ var splitTrusted = xormigrate.Migration{
return err
}
if _, err := sess.Where("trusted = ?", true).Cols("trusted_conf").Update(&repoV035{
if _, err := sess.Where("trusted = ?", true).Cols("trusted_conf").Update(&repos{
Trusted: model.TrustedConfiguration{
Network: true,
Security: true,

View file

@ -0,0 +1,59 @@
// Copyright 2024 Woodpecker Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package migration
import (
"fmt"
"src.techknowlogick.com/xormigrate"
"xorm.io/xorm"
"xorm.io/xorm/schemas"
)
var correctPotentialCorruptOrgsUsersRelation = xormigrate.Migration{
ID: "correct-potential-corrupt-orgs-users-relation",
MigrateSession: func(sess *xorm.Session) error {
type users struct {
ID int64 `xorm:"pk autoincr 'id'"`
ForgeID int64 `xorm:"forge_id"`
Login string `xorm:"UNIQUE 'login'"`
OrgID int64 `xorm:"org_id"`
}
type orgs struct {
ID int64 `xorm:"pk autoincr 'id'"`
ForgeID int64 `xorm:"forge_id"`
Name string `xorm:"UNIQUE 'name'"`
}
if err := sess.Sync(new(users), new(orgs)); err != nil {
return fmt.Errorf("sync new models failed: %w", err)
}
dialect := sess.Engine().Dialect().URI().DBType
var err error
switch dialect {
case schemas.MYSQL:
_, err = sess.Exec(`UPDATE users u JOIN orgs o ON o.name = u.login AND o.forge_id = u.forge_id SET u.org_id = o.id;`)
case schemas.POSTGRES:
_, err = sess.Exec(`UPDATE users u SET org_id = o.id FROM orgs o WHERE o.name = u.login AND o.forge_id = u.forge_id;`)
case schemas.SQLITE:
_, err = sess.Exec(`UPDATE users SET org_id = ( SELECT orgs.id FROM orgs WHERE orgs.name = users.login AND orgs.forge_id = users.forge_id ) WHERE users.login IN (SELECT orgs.name FROM orgs);`)
default:
err = fmt.Errorf("dialect '%s' not supported", dialect)
}
return err
},
}

View file

@ -46,6 +46,7 @@ var migrationTasks = []*xormigrate.Migration{
&addOrgAgents,
&addCustomLabelsToAgent,
&splitTrusted,
&correctPotentialCorruptOrgsUsersRelation,
}
var allBeans = []any{

View file

@ -19,13 +19,18 @@
}}</a>
</div>
<div class="flex ml-auto -m-1.5 items-center space-x-2">
<div v-if="user?.admin" class="relative">
<IconButton class="navbar-icon" :title="$t('settings')" :to="{ name: 'admin-settings' }" icon="settings" />
<IconButton
v-if="user?.admin"
class="navbar-icon relative"
:title="$t('settings')"
:to="{ name: 'admin-settings' }"
>
<Icon name="settings" />
<div
v-if="version?.needsUpdate"
class="absolute top-2 right-2 bg-int-wp-state-error-100 rounded-full w-3 h-3"
/>
</div>
</IconButton>
<ActivePipelines v-if="user" class="navbar-icon" />
<IconButton v-if="user" :to="{ name: 'user' }" :title="$t('user.settings.settings')" class="navbar-icon !p-1.5">
@ -41,6 +46,7 @@ import { useRoute } from 'vue-router';
import WoodpeckerLogo from '~/assets/logo.svg?component';
import Button from '~/components/atomic/Button.vue';
import Icon from '~/components/atomic/Icon.vue';
import IconButton from '~/components/atomic/IconButton.vue';
import useAuthentication from '~/compositions/useAuthentication';
import useConfig from '~/compositions/useConfig';