From 79e4dd5380b31380dde614ca0be501d76c2433e0 Mon Sep 17 00:00:00 2001 From: Henrik Huitti Date: Tue, 22 Jul 2025 17:22:26 +0300 Subject: [PATCH] feat(k8s): Kubernetes namespace per organization (#5309) --- .../11-backends/20-kubernetes.md | 9 ++ pipeline/backend/kubernetes/flags.go | 8 +- pipeline/backend/kubernetes/kubernetes.go | 33 ++++- pipeline/backend/kubernetes/namespace.go | 41 ++++++ pipeline/backend/kubernetes/namespace_test.go | 120 ++++++++++++++++++ pipeline/backend/kubernetes/pod.go | 6 +- pipeline/backend/kubernetes/secrets.go | 6 +- pipeline/backend/kubernetes/service.go | 6 +- pipeline/backend/kubernetes/volume.go | 14 +- pipeline/backend/kubernetes/volume_test.go | 13 +- pipeline/backend/types/step.go | 1 + server/pipeline/stepbuilder/stepBuilder.go | 1 + 12 files changed, 229 insertions(+), 29 deletions(-) create mode 100644 pipeline/backend/kubernetes/namespace.go create mode 100644 pipeline/backend/kubernetes/namespace_test.go diff --git a/docs/docs/30-administration/10-configuration/11-backends/20-kubernetes.md b/docs/docs/30-administration/10-configuration/11-backends/20-kubernetes.md index 63b0bc60c..f7afbc406 100644 --- a/docs/docs/30-administration/10-configuration/11-backends/20-kubernetes.md +++ b/docs/docs/30-administration/10-configuration/11-backends/20-kubernetes.md @@ -308,6 +308,15 @@ The namespace to create worker Pods in. --- +### BACKEND_K8S_NAMESPACE_PER_ORGANIZATION + +- Name: `WOODPECKER_BACKEND_K8S_NAMESPACE_PER_ORGANIZATION` +- Default: `false` + +Enables namespace isolation per Woodpecker organization. When enabled, each organization gets its own dedicated Kubernetes namespace for improved security and resource isolation. + +With this feature enabled, Woodpecker creates separate Kubernetes namespaces for each organization using the format `{WOODPECKER_BACKEND_K8S_NAMESPACE}-{organization-id}`. Namespaces are created automatically when needed, but they are not automatically deleted when organizations are removed from Woodpecker. + ### BACKEND_K8S_VOLUME_SIZE - Name: `WOODPECKER_BACKEND_K8S_VOLUME_SIZE` diff --git a/pipeline/backend/kubernetes/flags.go b/pipeline/backend/kubernetes/flags.go index 910f52624..d866347a1 100644 --- a/pipeline/backend/kubernetes/flags.go +++ b/pipeline/backend/kubernetes/flags.go @@ -22,9 +22,15 @@ var Flags = []cli.Flag{ &cli.StringFlag{ Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_NAMESPACE"), Name: "backend-k8s-namespace", - Usage: "backend k8s namespace", + Usage: "backend k8s namespace, if used with WOODPECKER_BACKEND_K8S_NAMESPACE_PER_ORGANIZATION, this will be the prefix for the namespace appended with the organization name.", Value: "woodpecker", }, + &cli.BoolFlag{ + Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_NAMESPACE_PER_ORGANIZATION"), + Name: "backend-k8s-namespace-per-org", + Usage: "Whether to enable namespace segregation per organization feature. When enabled, Woodpecker will create the Kubernetes resources to separated Kubernetes namespaces per Woodpecker organization.", + Value: false, + }, &cli.StringFlag{ Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_VOLUME_SIZE"), Name: "backend-k8s-volume-size", diff --git a/pipeline/backend/kubernetes/kubernetes.go b/pipeline/backend/kubernetes/kubernetes.go index 24470842e..a5919f898 100644 --- a/pipeline/backend/kubernetes/kubernetes.go +++ b/pipeline/backend/kubernetes/kubernetes.go @@ -23,6 +23,8 @@ import ( "os" "runtime" "slices" + "strconv" + "strings" "time" "github.com/rs/zerolog/log" @@ -56,6 +58,7 @@ type kube struct { type config struct { Namespace string + EnableNamespacePerOrg bool StorageClass string VolumeSize string StorageRwx bool @@ -68,6 +71,14 @@ type config struct { SecurityContext SecurityContextConfig NativeSecretsAllowFromStep bool } + +func (c *config) GetNamespace(orgID int64) string { + if c.EnableNamespacePerOrg { + return strings.ToLower(fmt.Sprintf("%s-%s", c.Namespace, strconv.FormatInt(orgID, 10))) + } + return c.Namespace +} + type SecurityContextConfig struct { RunAsNonRoot bool FSGroup *int64 @@ -88,6 +99,7 @@ func configFromCliContext(ctx context.Context) (*config, error) { if c, ok := ctx.Value(types.CliCommand).(*cli.Command); ok { config := config{ Namespace: c.String("backend-k8s-namespace"), + EnableNamespacePerOrg: c.Bool("backend-k8s-namespace-per-org"), StorageClass: c.String("backend-k8s-storage-class"), VolumeSize: c.String("backend-k8s-volume-size"), StorageRwx: c.Bool("backend-k8s-storage-rwx"), @@ -191,7 +203,16 @@ func (e *kube) getConfig() *config { func (e *kube) SetupWorkflow(ctx context.Context, conf *types.Config, taskUUID string) error { log.Trace().Str("taskUUID", taskUUID).Msgf("Setting up Kubernetes primitives") - _, err := startVolume(ctx, e, conf.Volume) + namespace := e.config.GetNamespace(conf.Stages[0].Steps[0].OrgID) + + if e.config.EnableNamespacePerOrg { + err := mkNamespace(ctx, e.client.CoreV1().Namespaces(), namespace) + if err != nil { + return err + } + } + + _, err := startVolume(ctx, e, conf.Volume, namespace) if err != nil { return err } @@ -276,7 +297,7 @@ func (e *kube) WaitStep(ctx context.Context, step *types.Step, taskUUID string) } } - si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.Namespace)) + si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.GetNamespace(step.OrgID))) if _, err := si.Core().V1().Pods().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: podUpdated, @@ -292,7 +313,7 @@ func (e *kube) WaitStep(ctx context.Context, step *types.Step, taskUUID string) // TODO: Cancel on ctx.Done <-finished - pod, err := e.client.CoreV1().Pods(e.config.Namespace).Get(ctx, podName, meta_v1.GetOptions{}) + pod, err := e.client.CoreV1().Pods(e.config.GetNamespace(step.OrgID)).Get(ctx, podName, meta_v1.GetOptions{}) if err != nil { return nil, err } @@ -351,7 +372,7 @@ func (e *kube) TailStep(ctx context.Context, step *types.Step, taskUUID string) } } - si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.Namespace)) + si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.GetNamespace(step.OrgID))) if _, err := si.Core().V1().Pods().Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: podUpdated, @@ -372,7 +393,7 @@ func (e *kube) TailStep(ctx context.Context, step *types.Step, taskUUID string) } logs, err := e.client.CoreV1().RESTClient().Get(). - Namespace(e.config.Namespace). + Namespace(e.config.GetNamespace(step.OrgID)). Name(podName). Resource("pods"). SubResource("log"). @@ -439,7 +460,7 @@ func (e *kube) DestroyWorkflow(ctx context.Context, conf *types.Config, taskUUID } } - err := stopVolume(ctx, e, conf.Volume, defaultDeleteOptions) + err := stopVolume(ctx, e, conf.Volume, e.config.GetNamespace(conf.Stages[0].Steps[0].OrgID), defaultDeleteOptions) if err != nil { return err } diff --git a/pipeline/backend/kubernetes/namespace.go b/pipeline/backend/kubernetes/namespace.go new file mode 100644 index 000000000..d5ddfaaa1 --- /dev/null +++ b/pipeline/backend/kubernetes/namespace.go @@ -0,0 +1,41 @@ +package kubernetes + +import ( + "context" + + "github.com/rs/zerolog/log" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type K8sNamespaceClient interface { + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error) + Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error) +} + +func mkNamespace(ctx context.Context, client K8sNamespaceClient, namespace string) error { + _, err := client.Get(ctx, namespace, metav1.GetOptions{}) + if err == nil { + log.Trace().Str("namespace", namespace).Msg("Kubernetes namespace already exists") + return nil + } + + if !errors.IsNotFound(err) { + log.Trace().Err(err).Str("namespace", namespace).Msg("failed to check Kubernetes namespace existence") + return err + } + + log.Trace().Str("namespace", namespace).Msg("creating Kubernetes namespace") + + _, err = client.Create(ctx, &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: namespace}, + }, metav1.CreateOptions{}) + if err != nil { + log.Error().Err(err).Str("namespace", namespace).Msg("failed to create Kubernetes namespace") + return err + } + + log.Trace().Str("namespace", namespace).Msg("Kubernetes namespace created successfully") + return nil +} diff --git a/pipeline/backend/kubernetes/namespace_test.go b/pipeline/backend/kubernetes/namespace_test.go new file mode 100644 index 000000000..c8e4682dc --- /dev/null +++ b/pipeline/backend/kubernetes/namespace_test.go @@ -0,0 +1,120 @@ +package kubernetes + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type mockNamespaceClient struct { + getError error + createError error + getCalled bool + createCalled bool + createdNS *v1.Namespace +} + +func (m *mockNamespaceClient) Get(_ context.Context, name string, _ metav1.GetOptions) (*v1.Namespace, error) { + m.getCalled = true + if m.getError != nil { + return nil, m.getError + } + return &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + }, nil +} + +func (m *mockNamespaceClient) Create(_ context.Context, ns *v1.Namespace, _ metav1.CreateOptions) (*v1.Namespace, error) { + m.createCalled = true + m.createdNS = ns + return ns, m.createError +} + +func TestMkNamespace(t *testing.T) { + tests := []struct { + name string + namespace string + setupMock func(*mockNamespaceClient) + expectError bool + errorContains string + expectGetCalled bool + expectCreateCalled bool + }{ + { + name: "should succeed when namespace already exists", + namespace: "existing-namespace", + setupMock: func(m *mockNamespaceClient) { + m.getError = nil // namespace exists + }, + expectError: false, + expectGetCalled: true, + expectCreateCalled: false, + }, + { + name: "should create namespace when it doesn't exist", + namespace: "new-namespace", + setupMock: func(m *mockNamespaceClient) { + m.getError = k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "new-namespace") + m.createError = nil + }, + expectError: false, + expectGetCalled: true, + expectCreateCalled: true, + }, + { + name: "should fail when Get namespace returns generic error", + namespace: "error-namespace", + setupMock: func(m *mockNamespaceClient) { + m.getError = errors.New("api server unavailable") + }, + expectError: true, + errorContains: "api server unavailable", + expectGetCalled: true, + expectCreateCalled: false, + }, + { + name: "should fail when Create namespace returns error", + namespace: "create-fail-namespace", + setupMock: func(m *mockNamespaceClient) { + m.getError = k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "create-fail-namespace") + m.createError = errors.New("insufficient permissions") + }, + expectError: true, + errorContains: "insufficient permissions", + expectGetCalled: true, + expectCreateCalled: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := &mockNamespaceClient{} + tt.setupMock(client) + + err := mkNamespace(t.Context(), client, tt.namespace) + + if tt.expectError { + assert.Error(t, err) + if tt.errorContains != "" { + assert.Contains(t, err.Error(), tt.errorContains) + } + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tt.expectGetCalled, client.getCalled, "Get call expectation") + assert.Equal(t, tt.expectCreateCalled, client.createCalled, "Create call expectation") + + if tt.expectCreateCalled && client.createCalled { + assert.NotNil(t, client.createdNS, "Created namespace should not be nil") + assert.Equal(t, tt.namespace, client.createdNS.Name, "Created namespace should have correct name") + } + }) + } +} diff --git a/pipeline/backend/kubernetes/pod.go b/pipeline/backend/kubernetes/pod.go index c1a9e8b9f..d595e722b 100644 --- a/pipeline/backend/kubernetes/pod.go +++ b/pipeline/backend/kubernetes/pod.go @@ -88,7 +88,7 @@ func podMeta(step *types.Step, config *config, options BackendOptions, podName s var err error meta := meta_v1.ObjectMeta{ Name: podName, - Namespace: config.Namespace, + Namespace: config.GetNamespace(step.OrgID), Annotations: podAnnotations(config, options), } @@ -598,7 +598,7 @@ func startPod(ctx context.Context, engine *kube, step *types.Step, options Backe } log.Trace().Msgf("creating pod: %s", pod.Name) - return engine.client.CoreV1().Pods(engineConfig.Namespace).Create(ctx, pod, meta_v1.CreateOptions{}) + return engine.client.CoreV1().Pods(engineConfig.GetNamespace(step.OrgID)).Create(ctx, pod, meta_v1.CreateOptions{}) } func stopPod(ctx context.Context, engine *kube, step *types.Step, deleteOpts meta_v1.DeleteOptions) error { @@ -609,7 +609,7 @@ func stopPod(ctx context.Context, engine *kube, step *types.Step, deleteOpts met log.Trace().Str("name", podName).Msg("deleting pod") - err = engine.client.CoreV1().Pods(engine.config.Namespace).Delete(ctx, podName, deleteOpts) + err = engine.client.CoreV1().Pods(engine.config.GetNamespace(step.OrgID)).Delete(ctx, podName, deleteOpts) if errors.IsNotFound(err) { // Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps. return nil diff --git a/pipeline/backend/kubernetes/secrets.go b/pipeline/backend/kubernetes/secrets.go index 560a6f873..6a707706f 100644 --- a/pipeline/backend/kubernetes/secrets.go +++ b/pipeline/backend/kubernetes/secrets.go @@ -237,7 +237,7 @@ func mkRegistrySecret(step *types.Step, config *config) (*v1.Secret, error) { return &v1.Secret{ ObjectMeta: meta_v1.ObjectMeta{ - Namespace: config.Namespace, + Namespace: config.GetNamespace(step.OrgID), Name: name, Labels: labels, }, @@ -288,7 +288,7 @@ func startRegistrySecret(ctx context.Context, engine *kube, step *types.Step) er return err } log.Trace().Msgf("creating secret: %s", secret.Name) - _, err = engine.client.CoreV1().Secrets(engine.config.Namespace).Create(ctx, secret, meta_v1.CreateOptions{}) + _, err = engine.client.CoreV1().Secrets(engine.config.GetNamespace(step.OrgID)).Create(ctx, secret, meta_v1.CreateOptions{}) if err != nil { return err } @@ -302,7 +302,7 @@ func stopRegistrySecret(ctx context.Context, engine *kube, step *types.Step, del } log.Trace().Str("name", name).Msg("deleting secret") - err = engine.client.CoreV1().Secrets(engine.config.Namespace).Delete(ctx, name, deleteOpts) + err = engine.client.CoreV1().Secrets(engine.config.GetNamespace(step.OrgID)).Delete(ctx, name, deleteOpts) if errors.IsNotFound(err) { return nil } diff --git a/pipeline/backend/kubernetes/service.go b/pipeline/backend/kubernetes/service.go index 428684648..e3a7d57a7 100644 --- a/pipeline/backend/kubernetes/service.go +++ b/pipeline/backend/kubernetes/service.go @@ -52,7 +52,7 @@ func mkService(step *types.Step, config *config) (*v1.Service, error) { return &v1.Service{ ObjectMeta: meta_v1.ObjectMeta{ Name: name, - Namespace: config.Namespace, + Namespace: config.GetNamespace(step.OrgID), }, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, @@ -85,7 +85,7 @@ func startService(ctx context.Context, engine *kube, step *types.Step) (*v1.Serv } log.Trace().Str("name", svc.Name).Interface("selector", svc.Spec.Selector).Interface("ports", svc.Spec.Ports).Msg("creating service") - return engine.client.CoreV1().Services(engineConfig.Namespace).Create(ctx, svc, meta_v1.CreateOptions{}) + return engine.client.CoreV1().Services(engineConfig.GetNamespace(step.OrgID)).Create(ctx, svc, meta_v1.CreateOptions{}) } func stopService(ctx context.Context, engine *kube, step *types.Step, deleteOpts meta_v1.DeleteOptions) error { @@ -95,7 +95,7 @@ func stopService(ctx context.Context, engine *kube, step *types.Step, deleteOpts } log.Trace().Str("name", svcName).Msg("deleting service") - err = engine.client.CoreV1().Services(engine.config.Namespace).Delete(ctx, svcName, deleteOpts) + err = engine.client.CoreV1().Services(engine.config.GetNamespace(step.OrgID)).Delete(ctx, svcName, deleteOpts) if errors.IsNotFound(err) { // Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps. log.Trace().Err(err).Msgf("unable to delete service %s", svcName) diff --git a/pipeline/backend/kubernetes/volume.go b/pipeline/backend/kubernetes/volume.go index 408c4c18c..ad2bd2ff0 100644 --- a/pipeline/backend/kubernetes/volume.go +++ b/pipeline/backend/kubernetes/volume.go @@ -25,7 +25,7 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func mkPersistentVolumeClaim(config *config, name string) (*v1.PersistentVolumeClaim, error) { +func mkPersistentVolumeClaim(config *config, name, namespace string) (*v1.PersistentVolumeClaim, error) { _storageClass := &config.StorageClass if config.StorageClass == "" { _storageClass = nil @@ -47,7 +47,7 @@ func mkPersistentVolumeClaim(config *config, name string) (*v1.PersistentVolumeC pvc := &v1.PersistentVolumeClaim{ ObjectMeta: meta_v1.ObjectMeta{ Name: volumeName, - Namespace: config.Namespace, + Namespace: namespace, }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{accessMode}, @@ -75,25 +75,25 @@ func volumeMountPath(name string) string { return s[0] } -func startVolume(ctx context.Context, engine *kube, name string) (*v1.PersistentVolumeClaim, error) { +func startVolume(ctx context.Context, engine *kube, name, namespace string) (*v1.PersistentVolumeClaim, error) { engineConfig := engine.getConfig() - pvc, err := mkPersistentVolumeClaim(engineConfig, name) + pvc, err := mkPersistentVolumeClaim(engineConfig, name, namespace) if err != nil { return nil, err } log.Trace().Msgf("creating volume: %s", pvc.Name) - return engine.client.CoreV1().PersistentVolumeClaims(engineConfig.Namespace).Create(ctx, pvc, meta_v1.CreateOptions{}) + return engine.client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, meta_v1.CreateOptions{}) } -func stopVolume(ctx context.Context, engine *kube, name string, deleteOpts meta_v1.DeleteOptions) error { +func stopVolume(ctx context.Context, engine *kube, name, namespace string, deleteOpts meta_v1.DeleteOptions) error { pvcName, err := volumeName(name) if err != nil { return err } log.Trace().Str("name", pvcName).Msg("deleting volume") - err = engine.client.CoreV1().PersistentVolumeClaims(engine.config.Namespace).Delete(ctx, pvcName, deleteOpts) + err = engine.client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, deleteOpts) if errors.IsNotFound(err) { // Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps. log.Trace().Err(err).Msgf("unable to delete service %s", pvcName) diff --git a/pipeline/backend/kubernetes/volume_test.go b/pipeline/backend/kubernetes/volume_test.go index 07ec7c30e..311ed4f9b 100644 --- a/pipeline/backend/kubernetes/volume_test.go +++ b/pipeline/backend/kubernetes/volume_test.go @@ -42,6 +42,7 @@ func TestPvcMount(t *testing.T) { } func TestPersistentVolumeClaim(t *testing.T) { + namespace := "someNamespace" expectedRwx := ` { "metadata": { @@ -85,11 +86,11 @@ func TestPersistentVolumeClaim(t *testing.T) { }` pvc, err := mkPersistentVolumeClaim(&config{ - Namespace: "someNamespace", + Namespace: namespace, StorageClass: "local-storage", VolumeSize: "1Gi", StorageRwx: true, - }, "somename") + }, "somename", namespace) assert.NoError(t, err) j, err := json.Marshal(pvc) @@ -97,11 +98,11 @@ func TestPersistentVolumeClaim(t *testing.T) { assert.JSONEq(t, expectedRwx, string(j)) pvc, err = mkPersistentVolumeClaim(&config{ - Namespace: "someNamespace", + Namespace: namespace, StorageClass: "local-storage", VolumeSize: "1Gi", StorageRwx: false, - }, "somename") + }, "somename", namespace) assert.NoError(t, err) j, err = json.Marshal(pvc) @@ -109,10 +110,10 @@ func TestPersistentVolumeClaim(t *testing.T) { assert.JSONEq(t, expectedRwo, string(j)) _, err = mkPersistentVolumeClaim(&config{ - Namespace: "someNamespace", + Namespace: namespace, StorageClass: "local-storage", VolumeSize: "1Gi", StorageRwx: false, - }, "some0..INVALID3name") + }, "some0..INVALID3name", namespace) assert.Error(t, err) } diff --git a/pipeline/backend/types/step.go b/pipeline/backend/types/step.go index 79e38c44c..57adde715 100644 --- a/pipeline/backend/types/step.go +++ b/pipeline/backend/types/step.go @@ -17,6 +17,7 @@ package types // Step defines a container process. type Step struct { Name string `json:"name"` + OrgID int64 `json:"org_id,omitempty"` UUID string `json:"uuid"` Type StepType `json:"type,omitempty"` Image string `json:"image,omitempty"` diff --git a/server/pipeline/stepbuilder/stepBuilder.go b/server/pipeline/stepbuilder/stepBuilder.go index 173bbfb5a..192bdc387 100644 --- a/server/pipeline/stepbuilder/stepBuilder.go +++ b/server/pipeline/stepbuilder/stepBuilder.go @@ -216,6 +216,7 @@ func (b *StepBuilder) genItemForWorkflow(workflow *model.Workflow, axis matrix.A for stageI := range item.Config.Stages { for stepI := range item.Config.Stages[stageI].Steps { item.Config.Stages[stageI].Steps[stepI].WorkflowLabels = item.Labels + item.Config.Stages[stageI].Steps[stepI].OrgID = b.Repo.OrgID } }