mirror of
https://github.com/woodpecker-ci/woodpecker.git
synced 2025-09-03 20:23:23 +00:00
feat(k8s): Kubernetes namespace per organization (#5309)
This commit is contained in:
@@ -308,6 +308,15 @@ The namespace to create worker Pods in.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
### BACKEND_K8S_NAMESPACE_PER_ORGANIZATION
|
||||||
|
|
||||||
|
- Name: `WOODPECKER_BACKEND_K8S_NAMESPACE_PER_ORGANIZATION`
|
||||||
|
- Default: `false`
|
||||||
|
|
||||||
|
Enables namespace isolation per Woodpecker organization. When enabled, each organization gets its own dedicated Kubernetes namespace for improved security and resource isolation.
|
||||||
|
|
||||||
|
With this feature enabled, Woodpecker creates separate Kubernetes namespaces for each organization using the format `{WOODPECKER_BACKEND_K8S_NAMESPACE}-{organization-id}`. Namespaces are created automatically when needed, but they are not automatically deleted when organizations are removed from Woodpecker.
|
||||||
|
|
||||||
### BACKEND_K8S_VOLUME_SIZE
|
### BACKEND_K8S_VOLUME_SIZE
|
||||||
|
|
||||||
- Name: `WOODPECKER_BACKEND_K8S_VOLUME_SIZE`
|
- Name: `WOODPECKER_BACKEND_K8S_VOLUME_SIZE`
|
||||||
|
@@ -22,9 +22,15 @@ var Flags = []cli.Flag{
|
|||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_NAMESPACE"),
|
Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_NAMESPACE"),
|
||||||
Name: "backend-k8s-namespace",
|
Name: "backend-k8s-namespace",
|
||||||
Usage: "backend k8s namespace",
|
Usage: "backend k8s namespace, if used with WOODPECKER_BACKEND_K8S_NAMESPACE_PER_ORGANIZATION, this will be the prefix for the namespace appended with the organization name.",
|
||||||
Value: "woodpecker",
|
Value: "woodpecker",
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_NAMESPACE_PER_ORGANIZATION"),
|
||||||
|
Name: "backend-k8s-namespace-per-org",
|
||||||
|
Usage: "Whether to enable namespace segregation per organization feature. When enabled, Woodpecker will create the Kubernetes resources to separated Kubernetes namespaces per Woodpecker organization.",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_VOLUME_SIZE"),
|
Sources: cli.EnvVars("WOODPECKER_BACKEND_K8S_VOLUME_SIZE"),
|
||||||
Name: "backend-k8s-volume-size",
|
Name: "backend-k8s-volume-size",
|
||||||
|
@@ -23,6 +23,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
@@ -56,6 +58,7 @@ type kube struct {
|
|||||||
|
|
||||||
type config struct {
|
type config struct {
|
||||||
Namespace string
|
Namespace string
|
||||||
|
EnableNamespacePerOrg bool
|
||||||
StorageClass string
|
StorageClass string
|
||||||
VolumeSize string
|
VolumeSize string
|
||||||
StorageRwx bool
|
StorageRwx bool
|
||||||
@@ -68,6 +71,14 @@ type config struct {
|
|||||||
SecurityContext SecurityContextConfig
|
SecurityContext SecurityContextConfig
|
||||||
NativeSecretsAllowFromStep bool
|
NativeSecretsAllowFromStep bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *config) GetNamespace(orgID int64) string {
|
||||||
|
if c.EnableNamespacePerOrg {
|
||||||
|
return strings.ToLower(fmt.Sprintf("%s-%s", c.Namespace, strconv.FormatInt(orgID, 10)))
|
||||||
|
}
|
||||||
|
return c.Namespace
|
||||||
|
}
|
||||||
|
|
||||||
type SecurityContextConfig struct {
|
type SecurityContextConfig struct {
|
||||||
RunAsNonRoot bool
|
RunAsNonRoot bool
|
||||||
FSGroup *int64
|
FSGroup *int64
|
||||||
@@ -88,6 +99,7 @@ func configFromCliContext(ctx context.Context) (*config, error) {
|
|||||||
if c, ok := ctx.Value(types.CliCommand).(*cli.Command); ok {
|
if c, ok := ctx.Value(types.CliCommand).(*cli.Command); ok {
|
||||||
config := config{
|
config := config{
|
||||||
Namespace: c.String("backend-k8s-namespace"),
|
Namespace: c.String("backend-k8s-namespace"),
|
||||||
|
EnableNamespacePerOrg: c.Bool("backend-k8s-namespace-per-org"),
|
||||||
StorageClass: c.String("backend-k8s-storage-class"),
|
StorageClass: c.String("backend-k8s-storage-class"),
|
||||||
VolumeSize: c.String("backend-k8s-volume-size"),
|
VolumeSize: c.String("backend-k8s-volume-size"),
|
||||||
StorageRwx: c.Bool("backend-k8s-storage-rwx"),
|
StorageRwx: c.Bool("backend-k8s-storage-rwx"),
|
||||||
@@ -191,7 +203,16 @@ func (e *kube) getConfig() *config {
|
|||||||
func (e *kube) SetupWorkflow(ctx context.Context, conf *types.Config, taskUUID string) error {
|
func (e *kube) SetupWorkflow(ctx context.Context, conf *types.Config, taskUUID string) error {
|
||||||
log.Trace().Str("taskUUID", taskUUID).Msgf("Setting up Kubernetes primitives")
|
log.Trace().Str("taskUUID", taskUUID).Msgf("Setting up Kubernetes primitives")
|
||||||
|
|
||||||
_, err := startVolume(ctx, e, conf.Volume)
|
namespace := e.config.GetNamespace(conf.Stages[0].Steps[0].OrgID)
|
||||||
|
|
||||||
|
if e.config.EnableNamespacePerOrg {
|
||||||
|
err := mkNamespace(ctx, e.client.CoreV1().Namespaces(), namespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := startVolume(ctx, e, conf.Volume, namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -276,7 +297,7 @@ func (e *kube) WaitStep(ctx context.Context, step *types.Step, taskUUID string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.Namespace))
|
si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.GetNamespace(step.OrgID)))
|
||||||
if _, err := si.Core().V1().Pods().Informer().AddEventHandler(
|
if _, err := si.Core().V1().Pods().Informer().AddEventHandler(
|
||||||
cache.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: podUpdated,
|
UpdateFunc: podUpdated,
|
||||||
@@ -292,7 +313,7 @@ func (e *kube) WaitStep(ctx context.Context, step *types.Step, taskUUID string)
|
|||||||
// TODO: Cancel on ctx.Done
|
// TODO: Cancel on ctx.Done
|
||||||
<-finished
|
<-finished
|
||||||
|
|
||||||
pod, err := e.client.CoreV1().Pods(e.config.Namespace).Get(ctx, podName, meta_v1.GetOptions{})
|
pod, err := e.client.CoreV1().Pods(e.config.GetNamespace(step.OrgID)).Get(ctx, podName, meta_v1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -351,7 +372,7 @@ func (e *kube) TailStep(ctx context.Context, step *types.Step, taskUUID string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.Namespace))
|
si := informers.NewSharedInformerFactoryWithOptions(e.client, defaultResyncDuration, informers.WithNamespace(e.config.GetNamespace(step.OrgID)))
|
||||||
if _, err := si.Core().V1().Pods().Informer().AddEventHandler(
|
if _, err := si.Core().V1().Pods().Informer().AddEventHandler(
|
||||||
cache.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: podUpdated,
|
UpdateFunc: podUpdated,
|
||||||
@@ -372,7 +393,7 @@ func (e *kube) TailStep(ctx context.Context, step *types.Step, taskUUID string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
logs, err := e.client.CoreV1().RESTClient().Get().
|
logs, err := e.client.CoreV1().RESTClient().Get().
|
||||||
Namespace(e.config.Namespace).
|
Namespace(e.config.GetNamespace(step.OrgID)).
|
||||||
Name(podName).
|
Name(podName).
|
||||||
Resource("pods").
|
Resource("pods").
|
||||||
SubResource("log").
|
SubResource("log").
|
||||||
@@ -439,7 +460,7 @@ func (e *kube) DestroyWorkflow(ctx context.Context, conf *types.Config, taskUUID
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := stopVolume(ctx, e, conf.Volume, defaultDeleteOptions)
|
err := stopVolume(ctx, e, conf.Volume, e.config.GetNamespace(conf.Stages[0].Steps[0].OrgID), defaultDeleteOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
41
pipeline/backend/kubernetes/namespace.go
Normal file
41
pipeline/backend/kubernetes/namespace.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package kubernetes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type K8sNamespaceClient interface {
|
||||||
|
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error)
|
||||||
|
Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mkNamespace(ctx context.Context, client K8sNamespaceClient, namespace string) error {
|
||||||
|
_, err := client.Get(ctx, namespace, metav1.GetOptions{})
|
||||||
|
if err == nil {
|
||||||
|
log.Trace().Str("namespace", namespace).Msg("Kubernetes namespace already exists")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !errors.IsNotFound(err) {
|
||||||
|
log.Trace().Err(err).Str("namespace", namespace).Msg("failed to check Kubernetes namespace existence")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace().Str("namespace", namespace).Msg("creating Kubernetes namespace")
|
||||||
|
|
||||||
|
_, err = client.Create(ctx, &v1.Namespace{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: namespace},
|
||||||
|
}, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Str("namespace", namespace).Msg("failed to create Kubernetes namespace")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Trace().Str("namespace", namespace).Msg("Kubernetes namespace created successfully")
|
||||||
|
return nil
|
||||||
|
}
|
120
pipeline/backend/kubernetes/namespace_test.go
Normal file
120
pipeline/backend/kubernetes/namespace_test.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
package kubernetes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockNamespaceClient struct {
|
||||||
|
getError error
|
||||||
|
createError error
|
||||||
|
getCalled bool
|
||||||
|
createCalled bool
|
||||||
|
createdNS *v1.Namespace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNamespaceClient) Get(_ context.Context, name string, _ metav1.GetOptions) (*v1.Namespace, error) {
|
||||||
|
m.getCalled = true
|
||||||
|
if m.getError != nil {
|
||||||
|
return nil, m.getError
|
||||||
|
}
|
||||||
|
return &v1.Namespace{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNamespaceClient) Create(_ context.Context, ns *v1.Namespace, _ metav1.CreateOptions) (*v1.Namespace, error) {
|
||||||
|
m.createCalled = true
|
||||||
|
m.createdNS = ns
|
||||||
|
return ns, m.createError
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMkNamespace(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
namespace string
|
||||||
|
setupMock func(*mockNamespaceClient)
|
||||||
|
expectError bool
|
||||||
|
errorContains string
|
||||||
|
expectGetCalled bool
|
||||||
|
expectCreateCalled bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "should succeed when namespace already exists",
|
||||||
|
namespace: "existing-namespace",
|
||||||
|
setupMock: func(m *mockNamespaceClient) {
|
||||||
|
m.getError = nil // namespace exists
|
||||||
|
},
|
||||||
|
expectError: false,
|
||||||
|
expectGetCalled: true,
|
||||||
|
expectCreateCalled: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should create namespace when it doesn't exist",
|
||||||
|
namespace: "new-namespace",
|
||||||
|
setupMock: func(m *mockNamespaceClient) {
|
||||||
|
m.getError = k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "new-namespace")
|
||||||
|
m.createError = nil
|
||||||
|
},
|
||||||
|
expectError: false,
|
||||||
|
expectGetCalled: true,
|
||||||
|
expectCreateCalled: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should fail when Get namespace returns generic error",
|
||||||
|
namespace: "error-namespace",
|
||||||
|
setupMock: func(m *mockNamespaceClient) {
|
||||||
|
m.getError = errors.New("api server unavailable")
|
||||||
|
},
|
||||||
|
expectError: true,
|
||||||
|
errorContains: "api server unavailable",
|
||||||
|
expectGetCalled: true,
|
||||||
|
expectCreateCalled: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should fail when Create namespace returns error",
|
||||||
|
namespace: "create-fail-namespace",
|
||||||
|
setupMock: func(m *mockNamespaceClient) {
|
||||||
|
m.getError = k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "create-fail-namespace")
|
||||||
|
m.createError = errors.New("insufficient permissions")
|
||||||
|
},
|
||||||
|
expectError: true,
|
||||||
|
errorContains: "insufficient permissions",
|
||||||
|
expectGetCalled: true,
|
||||||
|
expectCreateCalled: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
client := &mockNamespaceClient{}
|
||||||
|
tt.setupMock(client)
|
||||||
|
|
||||||
|
err := mkNamespace(t.Context(), client, tt.namespace)
|
||||||
|
|
||||||
|
if tt.expectError {
|
||||||
|
assert.Error(t, err)
|
||||||
|
if tt.errorContains != "" {
|
||||||
|
assert.Contains(t, err.Error(), tt.errorContains)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, tt.expectGetCalled, client.getCalled, "Get call expectation")
|
||||||
|
assert.Equal(t, tt.expectCreateCalled, client.createCalled, "Create call expectation")
|
||||||
|
|
||||||
|
if tt.expectCreateCalled && client.createCalled {
|
||||||
|
assert.NotNil(t, client.createdNS, "Created namespace should not be nil")
|
||||||
|
assert.Equal(t, tt.namespace, client.createdNS.Name, "Created namespace should have correct name")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@@ -88,7 +88,7 @@ func podMeta(step *types.Step, config *config, options BackendOptions, podName s
|
|||||||
var err error
|
var err error
|
||||||
meta := meta_v1.ObjectMeta{
|
meta := meta_v1.ObjectMeta{
|
||||||
Name: podName,
|
Name: podName,
|
||||||
Namespace: config.Namespace,
|
Namespace: config.GetNamespace(step.OrgID),
|
||||||
Annotations: podAnnotations(config, options),
|
Annotations: podAnnotations(config, options),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -598,7 +598,7 @@ func startPod(ctx context.Context, engine *kube, step *types.Step, options Backe
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Trace().Msgf("creating pod: %s", pod.Name)
|
log.Trace().Msgf("creating pod: %s", pod.Name)
|
||||||
return engine.client.CoreV1().Pods(engineConfig.Namespace).Create(ctx, pod, meta_v1.CreateOptions{})
|
return engine.client.CoreV1().Pods(engineConfig.GetNamespace(step.OrgID)).Create(ctx, pod, meta_v1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopPod(ctx context.Context, engine *kube, step *types.Step, deleteOpts meta_v1.DeleteOptions) error {
|
func stopPod(ctx context.Context, engine *kube, step *types.Step, deleteOpts meta_v1.DeleteOptions) error {
|
||||||
@@ -609,7 +609,7 @@ func stopPod(ctx context.Context, engine *kube, step *types.Step, deleteOpts met
|
|||||||
|
|
||||||
log.Trace().Str("name", podName).Msg("deleting pod")
|
log.Trace().Str("name", podName).Msg("deleting pod")
|
||||||
|
|
||||||
err = engine.client.CoreV1().Pods(engine.config.Namespace).Delete(ctx, podName, deleteOpts)
|
err = engine.client.CoreV1().Pods(engine.config.GetNamespace(step.OrgID)).Delete(ctx, podName, deleteOpts)
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
// Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps.
|
// Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps.
|
||||||
return nil
|
return nil
|
||||||
|
@@ -237,7 +237,7 @@ func mkRegistrySecret(step *types.Step, config *config) (*v1.Secret, error) {
|
|||||||
|
|
||||||
return &v1.Secret{
|
return &v1.Secret{
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Namespace: config.Namespace,
|
Namespace: config.GetNamespace(step.OrgID),
|
||||||
Name: name,
|
Name: name,
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
},
|
},
|
||||||
@@ -288,7 +288,7 @@ func startRegistrySecret(ctx context.Context, engine *kube, step *types.Step) er
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Trace().Msgf("creating secret: %s", secret.Name)
|
log.Trace().Msgf("creating secret: %s", secret.Name)
|
||||||
_, err = engine.client.CoreV1().Secrets(engine.config.Namespace).Create(ctx, secret, meta_v1.CreateOptions{})
|
_, err = engine.client.CoreV1().Secrets(engine.config.GetNamespace(step.OrgID)).Create(ctx, secret, meta_v1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -302,7 +302,7 @@ func stopRegistrySecret(ctx context.Context, engine *kube, step *types.Step, del
|
|||||||
}
|
}
|
||||||
log.Trace().Str("name", name).Msg("deleting secret")
|
log.Trace().Str("name", name).Msg("deleting secret")
|
||||||
|
|
||||||
err = engine.client.CoreV1().Secrets(engine.config.Namespace).Delete(ctx, name, deleteOpts)
|
err = engine.client.CoreV1().Secrets(engine.config.GetNamespace(step.OrgID)).Delete(ctx, name, deleteOpts)
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -52,7 +52,7 @@ func mkService(step *types.Step, config *config) (*v1.Service, error) {
|
|||||||
return &v1.Service{
|
return &v1.Service{
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
Namespace: config.Namespace,
|
Namespace: config.GetNamespace(step.OrgID),
|
||||||
},
|
},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
Type: v1.ServiceTypeClusterIP,
|
Type: v1.ServiceTypeClusterIP,
|
||||||
@@ -85,7 +85,7 @@ func startService(ctx context.Context, engine *kube, step *types.Step) (*v1.Serv
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Trace().Str("name", svc.Name).Interface("selector", svc.Spec.Selector).Interface("ports", svc.Spec.Ports).Msg("creating service")
|
log.Trace().Str("name", svc.Name).Interface("selector", svc.Spec.Selector).Interface("ports", svc.Spec.Ports).Msg("creating service")
|
||||||
return engine.client.CoreV1().Services(engineConfig.Namespace).Create(ctx, svc, meta_v1.CreateOptions{})
|
return engine.client.CoreV1().Services(engineConfig.GetNamespace(step.OrgID)).Create(ctx, svc, meta_v1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopService(ctx context.Context, engine *kube, step *types.Step, deleteOpts meta_v1.DeleteOptions) error {
|
func stopService(ctx context.Context, engine *kube, step *types.Step, deleteOpts meta_v1.DeleteOptions) error {
|
||||||
@@ -95,7 +95,7 @@ func stopService(ctx context.Context, engine *kube, step *types.Step, deleteOpts
|
|||||||
}
|
}
|
||||||
log.Trace().Str("name", svcName).Msg("deleting service")
|
log.Trace().Str("name", svcName).Msg("deleting service")
|
||||||
|
|
||||||
err = engine.client.CoreV1().Services(engine.config.Namespace).Delete(ctx, svcName, deleteOpts)
|
err = engine.client.CoreV1().Services(engine.config.GetNamespace(step.OrgID)).Delete(ctx, svcName, deleteOpts)
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
// Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps.
|
// Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps.
|
||||||
log.Trace().Err(err).Msgf("unable to delete service %s", svcName)
|
log.Trace().Err(err).Msgf("unable to delete service %s", svcName)
|
||||||
|
@@ -25,7 +25,7 @@ import (
|
|||||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func mkPersistentVolumeClaim(config *config, name string) (*v1.PersistentVolumeClaim, error) {
|
func mkPersistentVolumeClaim(config *config, name, namespace string) (*v1.PersistentVolumeClaim, error) {
|
||||||
_storageClass := &config.StorageClass
|
_storageClass := &config.StorageClass
|
||||||
if config.StorageClass == "" {
|
if config.StorageClass == "" {
|
||||||
_storageClass = nil
|
_storageClass = nil
|
||||||
@@ -47,7 +47,7 @@ func mkPersistentVolumeClaim(config *config, name string) (*v1.PersistentVolumeC
|
|||||||
pvc := &v1.PersistentVolumeClaim{
|
pvc := &v1.PersistentVolumeClaim{
|
||||||
ObjectMeta: meta_v1.ObjectMeta{
|
ObjectMeta: meta_v1.ObjectMeta{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
Namespace: config.Namespace,
|
Namespace: namespace,
|
||||||
},
|
},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
AccessModes: []v1.PersistentVolumeAccessMode{accessMode},
|
AccessModes: []v1.PersistentVolumeAccessMode{accessMode},
|
||||||
@@ -75,25 +75,25 @@ func volumeMountPath(name string) string {
|
|||||||
return s[0]
|
return s[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func startVolume(ctx context.Context, engine *kube, name string) (*v1.PersistentVolumeClaim, error) {
|
func startVolume(ctx context.Context, engine *kube, name, namespace string) (*v1.PersistentVolumeClaim, error) {
|
||||||
engineConfig := engine.getConfig()
|
engineConfig := engine.getConfig()
|
||||||
pvc, err := mkPersistentVolumeClaim(engineConfig, name)
|
pvc, err := mkPersistentVolumeClaim(engineConfig, name, namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Trace().Msgf("creating volume: %s", pvc.Name)
|
log.Trace().Msgf("creating volume: %s", pvc.Name)
|
||||||
return engine.client.CoreV1().PersistentVolumeClaims(engineConfig.Namespace).Create(ctx, pvc, meta_v1.CreateOptions{})
|
return engine.client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, meta_v1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopVolume(ctx context.Context, engine *kube, name string, deleteOpts meta_v1.DeleteOptions) error {
|
func stopVolume(ctx context.Context, engine *kube, name, namespace string, deleteOpts meta_v1.DeleteOptions) error {
|
||||||
pvcName, err := volumeName(name)
|
pvcName, err := volumeName(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Trace().Str("name", pvcName).Msg("deleting volume")
|
log.Trace().Str("name", pvcName).Msg("deleting volume")
|
||||||
|
|
||||||
err = engine.client.CoreV1().PersistentVolumeClaims(engine.config.Namespace).Delete(ctx, pvcName, deleteOpts)
|
err = engine.client.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, deleteOpts)
|
||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
// Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps.
|
// Don't abort on 404 errors from k8s, they most likely mean that the pod hasn't been created yet, usually because pipeline was canceled before running all steps.
|
||||||
log.Trace().Err(err).Msgf("unable to delete service %s", pvcName)
|
log.Trace().Err(err).Msgf("unable to delete service %s", pvcName)
|
||||||
|
@@ -42,6 +42,7 @@ func TestPvcMount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPersistentVolumeClaim(t *testing.T) {
|
func TestPersistentVolumeClaim(t *testing.T) {
|
||||||
|
namespace := "someNamespace"
|
||||||
expectedRwx := `
|
expectedRwx := `
|
||||||
{
|
{
|
||||||
"metadata": {
|
"metadata": {
|
||||||
@@ -85,11 +86,11 @@ func TestPersistentVolumeClaim(t *testing.T) {
|
|||||||
}`
|
}`
|
||||||
|
|
||||||
pvc, err := mkPersistentVolumeClaim(&config{
|
pvc, err := mkPersistentVolumeClaim(&config{
|
||||||
Namespace: "someNamespace",
|
Namespace: namespace,
|
||||||
StorageClass: "local-storage",
|
StorageClass: "local-storage",
|
||||||
VolumeSize: "1Gi",
|
VolumeSize: "1Gi",
|
||||||
StorageRwx: true,
|
StorageRwx: true,
|
||||||
}, "somename")
|
}, "somename", namespace)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
j, err := json.Marshal(pvc)
|
j, err := json.Marshal(pvc)
|
||||||
@@ -97,11 +98,11 @@ func TestPersistentVolumeClaim(t *testing.T) {
|
|||||||
assert.JSONEq(t, expectedRwx, string(j))
|
assert.JSONEq(t, expectedRwx, string(j))
|
||||||
|
|
||||||
pvc, err = mkPersistentVolumeClaim(&config{
|
pvc, err = mkPersistentVolumeClaim(&config{
|
||||||
Namespace: "someNamespace",
|
Namespace: namespace,
|
||||||
StorageClass: "local-storage",
|
StorageClass: "local-storage",
|
||||||
VolumeSize: "1Gi",
|
VolumeSize: "1Gi",
|
||||||
StorageRwx: false,
|
StorageRwx: false,
|
||||||
}, "somename")
|
}, "somename", namespace)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
j, err = json.Marshal(pvc)
|
j, err = json.Marshal(pvc)
|
||||||
@@ -109,10 +110,10 @@ func TestPersistentVolumeClaim(t *testing.T) {
|
|||||||
assert.JSONEq(t, expectedRwo, string(j))
|
assert.JSONEq(t, expectedRwo, string(j))
|
||||||
|
|
||||||
_, err = mkPersistentVolumeClaim(&config{
|
_, err = mkPersistentVolumeClaim(&config{
|
||||||
Namespace: "someNamespace",
|
Namespace: namespace,
|
||||||
StorageClass: "local-storage",
|
StorageClass: "local-storage",
|
||||||
VolumeSize: "1Gi",
|
VolumeSize: "1Gi",
|
||||||
StorageRwx: false,
|
StorageRwx: false,
|
||||||
}, "some0..INVALID3name")
|
}, "some0..INVALID3name", namespace)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
@@ -17,6 +17,7 @@ package types
|
|||||||
// Step defines a container process.
|
// Step defines a container process.
|
||||||
type Step struct {
|
type Step struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
OrgID int64 `json:"org_id,omitempty"`
|
||||||
UUID string `json:"uuid"`
|
UUID string `json:"uuid"`
|
||||||
Type StepType `json:"type,omitempty"`
|
Type StepType `json:"type,omitempty"`
|
||||||
Image string `json:"image,omitempty"`
|
Image string `json:"image,omitempty"`
|
||||||
|
@@ -216,6 +216,7 @@ func (b *StepBuilder) genItemForWorkflow(workflow *model.Workflow, axis matrix.A
|
|||||||
for stageI := range item.Config.Stages {
|
for stageI := range item.Config.Stages {
|
||||||
for stepI := range item.Config.Stages[stageI].Steps {
|
for stepI := range item.Config.Stages[stageI].Steps {
|
||||||
item.Config.Stages[stageI].Steps[stepI].WorkflowLabels = item.Labels
|
item.Config.Stages[stageI].Steps[stepI].WorkflowLabels = item.Labels
|
||||||
|
item.Config.Stages[stageI].Steps[stepI].OrgID = b.Repo.OrgID
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user