From 44a48d7890a629ce7b4bfdf3602707208bac3661 Mon Sep 17 00:00:00 2001 From: Dimitris Karakasilis Date: Wed, 7 Dec 2022 15:58:47 +0200 Subject: [PATCH 1/6] Switch from a Deployment to a Job and create permanent nginx Deployment Signed-off-by: Dimitris Karakasilis --- config/default/kustomization.yaml | 1 + config/manager/kustomization.yaml | 2 + config/manager/manager.yaml | 8 ---- config/nginx/deployment.yaml | 41 +++++++++++++++++ config/nginx/kustomization.yaml | 3 ++ config/nginx/service.yaml | 12 +++++ config/rbac/role_custom.yaml | 4 +- controllers/{deployment.go => job.go} | 64 ++++++++++----------------- controllers/osartifact_controller.go | 32 +++----------- controllers/service.go | 39 ---------------- 10 files changed, 92 insertions(+), 114 deletions(-) create mode 100644 config/nginx/deployment.yaml create mode 100644 config/nginx/kustomization.yaml create mode 100644 config/nginx/service.yaml rename controllers/{deployment.go => job.go} (82%) delete mode 100644 controllers/service.go diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 74d6d17..0639d7c 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -16,6 +16,7 @@ bases: - ../crd - ../rbac - ../manager +- ../nginx # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml #- ../webhook diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 7a33ce9..f121b11 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,6 +1,8 @@ resources: - manager.yaml +namespace: system + generatorOptions: disableNameSuffixHash: true diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 7874768..26eef16 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,15 +1,7 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: system ---- apiVersion: apps/v1 kind: Deployment metadata: name: controller-manager - namespace: system labels: control-plane: controller-manager spec: diff --git a/config/nginx/deployment.yaml b/config/nginx/deployment.yaml new file mode 100644 index 0000000..97700f9 --- /dev/null +++ b/config/nginx/deployment.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: nginx-public +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app.kubernetes.io/name: osbuilder-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: osbuilder-nginx + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: osbuilder-nginx + spec: + containers: + - image: nginx + name: nginx + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: nginx-public + ports: + - containerPort: 80 + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 + volumes: + - name: nginx-public + persistentVolumeClaim: + claimName: nginx-public diff --git a/config/nginx/kustomization.yaml b/config/nginx/kustomization.yaml new file mode 100644 index 0000000..a944d00 --- /dev/null +++ b/config/nginx/kustomization.yaml @@ -0,0 +1,3 @@ +resources: +- deployment.yaml +- service.yaml diff --git a/config/nginx/service.yaml b/config/nginx/service.yaml new file mode 100644 index 0000000..f2ac78b --- /dev/null +++ b/config/nginx/service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: osbuilder-nginx +spec: + type: NodePort + selector: + app.kubernetes.io/name: osbuilder-nginx + ports: + - protocol: TCP + port: 80 + targetPort: 80 diff --git a/config/rbac/role_custom.yaml b/config/rbac/role_custom.yaml index d3f8bd2..53e0dbd 100644 --- a/config/rbac/role_custom.yaml +++ b/config/rbac/role_custom.yaml @@ -47,9 +47,9 @@ rules: - create - update - apiGroups: - - "apps" + - "batch" resources: - - deployments + - jobs verbs: - get - create diff --git a/controllers/deployment.go b/controllers/job.go similarity index 82% rename from controllers/deployment.go rename to controllers/job.go index d5dbc7b..3eaf1be 100644 --- a/controllers/deployment.go +++ b/controllers/job.go @@ -20,13 +20,13 @@ import ( "fmt" buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" - appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func genDeploymentLabel(s string) map[string]string { +func genJobLabel(s string) map[string]string { return map[string]string{ "osbuild": "workload" + s, } @@ -64,7 +64,7 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) Command: []string{"/bin/bash", "-cxe"}, Args: []string{ fmt.Sprintf( - "tar -czvpf test.tar -C /rootfs . && luet util pack %s test.tar image.tar && mv image.tar /public", + "tar -czvpf test.tar -C /rootfs . && luet util pack %s test.tar image.tar && mv image.tar /artifacts", pushOptions.ImageName, ), }, @@ -74,8 +74,8 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) MountPath: "/rootfs", }, { - Name: "public", - MountPath: "/public", + Name: "artifacts", + MountPath: "/artifacts", }, }, } @@ -104,8 +104,7 @@ func osReleaseContainer(containerImage string) v1.Container { } } -func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, svc *v1.Service) *appsv1.Deployment { - // TODO: svc is unused, but could be used in the future to generate the Netboot URL +func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job { objMeta := metav1.ObjectMeta{ Name: artifact.Name, Namespace: artifact.Namespace, @@ -118,14 +117,14 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, serviceAccount := false cmd := fmt.Sprintf( - "/entrypoint.sh --debug --name %s build-iso --date=false --output /public dir:/rootfs", + "/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs", artifact.Name, ) volumeMounts := []v1.VolumeMount{ { - Name: "public", - MountPath: "/public", + Name: "artifacts", + MountPath: "/artifacts", }, { Name: "rootfs", @@ -142,7 +141,7 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, } cloudImgCmd := fmt.Sprintf( - "/raw-images.sh /rootfs /public/%s.raw", + "/raw-images.sh /rootfs /artifacts/%s.raw", artifact.Name, ) @@ -158,7 +157,7 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, if artifact.Spec.CloudConfig != "" || artifact.Spec.GRUBConfig != "" { cmd = fmt.Sprintf( - "/entrypoint.sh --debug --name %s build-iso --date=false --overlay-iso /iso/iso-overlay --output /public dir:/rootfs", + "/entrypoint.sh --debug --name %s build-iso --date=false --overlay-iso /iso/iso-overlay --output /artifacts dir:/rootfs", artifact.Name, ) } @@ -207,7 +206,7 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, }}, Args: []string{ fmt.Sprintf( - "/netboot.sh /public/%s.iso /public/%s", + "/netboot.sh /artifacts/%s.iso /artifacts/%s", artifact.Name, artifact.Name, ), @@ -223,7 +222,7 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, Command: []string{"/bin/bash", "-cxe"}, Args: []string{ fmt.Sprintf( - "/azure.sh /public/%s.raw /public/%s.vhd", + "/azure.sh /artifacts/%s.raw /artifacts/%s.vhd", artifact.Name, artifact.Name, ), @@ -239,7 +238,7 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, Command: []string{"/bin/bash", "-cxe"}, Args: []string{ fmt.Sprintf( - "/gce.sh /public/%s.raw /public/%s.gce.raw", + "/gce.sh /artifacts/%s.raw /artifacts/%s.gce.raw", artifact.Name, artifact.Name, ), @@ -247,20 +246,6 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, VolumeMounts: volumeMounts, } - servingContainer := v1.Container{ - ImagePullPolicy: v1.PullAlways, - SecurityContext: &v1.SecurityContext{Privileged: &privileged}, - Name: "serve", - Ports: []v1.ContainerPort{v1.ContainerPort{Name: "http", ContainerPort: 80}}, - Image: r.ServingImage, - VolumeMounts: []v1.VolumeMount{ - { - Name: "public", - MountPath: "/usr/share/nginx/html", - }, - }, - } - pod := v1.PodSpec{ AutomountServiceAccountToken: &serviceAccount, Volumes: []v1.Volume{ @@ -313,26 +298,25 @@ func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, } if pushImage { - pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact.Spec.PushOptions)) + pod.Containers = []v1.Container{ + createImageContainer(r.ToolImage, artifact.Spec.PushOptions), + } } - pod.Containers = []v1.Container{servingContainer} + jobLabels := genJobLabel(artifact.Name) - deploymentLabels := genDeploymentLabel(artifact.Name) - replicas := int32(1) - - return &appsv1.Deployment{ + job := batchv1.Job{ ObjectMeta: objMeta, - - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{MatchLabels: deploymentLabels}, - Replicas: &replicas, + Spec: batchv1.JobSpec{ + Selector: &metav1.LabelSelector{MatchLabels: jobLabels}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: deploymentLabels, + Labels: jobLabels, }, Spec: pod, }, }, } + + return &job } diff --git a/controllers/osartifact_controller.go b/controllers/osartifact_controller.go index b10a7d7..4d8514b 100644 --- a/controllers/osartifact_controller.go +++ b/controllers/osartifact_controller.go @@ -98,34 +98,16 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{Requeue: true}, err } - desiredService := genService(osbuild) - logger.Info(fmt.Sprintf("Checking service %v", osbuild)) - - svc, err := r.clientSet.CoreV1().Services(req.Namespace).Get(ctx, desiredService.Name, v1.GetOptions{}) - if svc == nil || apierrors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating service %v", desiredService)) - - svc, err = r.clientSet.CoreV1().Services(req.Namespace).Create(ctx, desiredService, v1.CreateOptions{}) - if err != nil { - logger.Error(err, "Failed while creating svc") - return ctrl.Result{}, err - } - - return ctrl.Result{Requeue: true}, err - } - if err != nil { - return ctrl.Result{Requeue: true}, err - } logger.Info(fmt.Sprintf("Checking deployment %v", osbuild)) - desiredDeployment := r.genDeployment(osbuild, svc) - deployment, err := r.clientSet.AppsV1().Deployments(req.Namespace).Get(ctx, desiredDeployment.Name, v1.GetOptions{}) - if deployment == nil || apierrors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating Deployment %v", deployment)) + desiredJob := r.genJob(osbuild) + job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{}) + if job == nil || apierrors.IsNotFound(err) { + logger.Info(fmt.Sprintf("Creating Job %v", job)) - deployment, err = r.clientSet.AppsV1().Deployments(req.Namespace).Create(ctx, desiredDeployment, v1.CreateOptions{}) + job, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, v1.CreateOptions{}) if err != nil { - logger.Error(err, "Failed while creating deployment") + logger.Error(err, "Failed while creating job") return ctrl.Result{}, nil } @@ -143,7 +125,7 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) if err != nil { return ctrl.Result{}, err } - if deployment.Status.ReadyReplicas == deployment.Status.Replicas { + if job.Status.Succeeded > 0 { copy.Status.Phase = "Ready" } else if copy.Status.Phase != "Building" { copy.Status.Phase = "Building" diff --git a/controllers/service.go b/controllers/service.go deleted file mode 100644 index 21f805c..0000000 --- a/controllers/service.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2022. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func genService(artifact buildv1alpha1.OSArtifact) *v1.Service { - objMeta := metav1.ObjectMeta{ - Name: artifact.Name, - Namespace: artifact.Namespace, - OwnerReferences: genOwner(artifact), - } - return &v1.Service{ - ObjectMeta: objMeta, - Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, - Ports: []v1.ServicePort{{Name: "http", Port: int32(80)}}, - Selector: genDeploymentLabel(artifact.Name), - }, - } -} From 224291994f6a9264931300382ce25dfe9c90dafb Mon Sep 17 00:00:00 2001 From: Dimitris Karakasilis Date: Thu, 8 Dec 2022 16:35:45 +0200 Subject: [PATCH 2/6] [WIP] Create rbac resources to allow the Job to copy to the server Pod Currently fails with: ``` Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:default:hello-kairos" cannot list resource "pods" in API group "" at the cluster scope ``` because we try to list pods with `-A`. This means we are going to get a similar error if we try to copy files to a Pod on another namespace unless we grant permission at the cluster scope or just that namespace. (Is that possible? Maybe if we create the Role in the same namespace as the server.) Signed-off-by: Dimitris Karakasilis --- Makefile | 3 +- config/default/manager_auth_proxy_patch.yaml | 4 + config/manager/manager.yaml | 7 + config/nginx/kustomization.yaml | 1 + config/nginx/role.yaml | 12 ++ config/rbac/role_custom.yaml | 33 ++++ controllers/job.go | 161 +++++++++++++++++-- controllers/osartifact_controller.go | 31 ++++ controllers/suite_test.go | 7 +- main.go | 16 +- 10 files changed, 255 insertions(+), 20 deletions(-) create mode 100644 config/nginx/role.yaml diff --git a/Makefile b/Makefile index 056a692..ed94f3b 100644 --- a/Makefile +++ b/Makefile @@ -147,6 +147,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified .PHONY: deploy deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + # TODO: No need to build and then apply. `kubectl apply -k config/default` does the trick $(KUSTOMIZE) build config/default | kubectl apply -f - @@ -282,4 +283,4 @@ kind-e2e-tests: ginkgo kind-setup install undeploy-dev deploy-dev e2e-tests kubesplit: manifests kustomize rm -rf helm-chart mkdir helm-chart - $(KUSTOMIZE) build config/default | kubesplit -helm helm-chart \ No newline at end of file + $(KUSTOMIZE) build config/default | kubesplit -helm helm-chart diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index bf7ce9d..0b4f144 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -38,3 +38,7 @@ spec: - "--health-probe-bind-address=:8081" - "--metrics-bind-address=127.0.0.1:8080" - "--leader-elect" + - "--copy-to-namespace=$(NGINX_NAMESPACE)" + - "--copy-role=$(ARTIFACT_COPIER_ROLE)" + - --copy-to-pod-label="app.kubernetes.io/name=osbuilder-nginx" + - --copy-to-path="/usr/share/nginx/html" diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 26eef16..437bd4d 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,3 +1,10 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/config/nginx/kustomization.yaml b/config/nginx/kustomization.yaml index a944d00..d587479 100644 --- a/config/nginx/kustomization.yaml +++ b/config/nginx/kustomization.yaml @@ -1,3 +1,4 @@ resources: - deployment.yaml - service.yaml +- role.yaml diff --git a/config/nginx/role.yaml b/config/nginx/role.yaml new file mode 100644 index 0000000..9d895b9 --- /dev/null +++ b/config/nginx/role.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: artifactCopier +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list + diff --git a/config/rbac/role_custom.yaml b/config/rbac/role_custom.yaml index 53e0dbd..a451127 100644 --- a/config/rbac/role_custom.yaml +++ b/config/rbac/role_custom.yaml @@ -5,6 +5,31 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + - roles + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - build.kairos.io resources: @@ -54,3 +79,11 @@ rules: - get - create - update +# Temporary so that it can grant these permissions to the created role +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get diff --git a/controllers/job.go b/controllers/job.go index 3eaf1be..326117d 100644 --- a/controllers/job.go +++ b/controllers/job.go @@ -17,12 +17,16 @@ limitations under the License. package controllers import ( + "context" "fmt" buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" + "github.com/pkg/errors" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -34,6 +38,7 @@ func genJobLabel(s string) map[string]string { // TODO: Handle registry auth // TODO: This shells out, but needs ENV_VAR with key refs mapping +// TODO: Cache downloaded images? func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container { return v1.Container{ ImagePullPolicy: v1.PullAlways, @@ -81,6 +86,28 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) } } +func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container { + return v1.Container{ + ImagePullPolicy: v1.PullAlways, + Name: "push-to-server", + Image: containerImage, + Command: []string{"/bin/bash", "-cxe"}, + Args: []string{ + fmt.Sprintf("kubectl get pods -n %s", artifactPodInfo.Namespace), + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "rootfs", + MountPath: "/rootfs", + }, + { + Name: "artifacts", + MountPath: "/artifacts", + }, + }, + } +} + func osReleaseContainer(containerImage string) v1.Container { return v1.Container{ ImagePullPolicy: v1.PullAlways, @@ -105,16 +132,12 @@ func osReleaseContainer(containerImage string) v1.Container { } func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job { - objMeta := metav1.ObjectMeta{ - Name: artifact.Name, - Namespace: artifact.Namespace, - OwnerReferences: genOwner(artifact), - } + objMeta := genObjectMeta(artifact) pushImage := artifact.Spec.PushOptions.Push privileged := false - serviceAccount := false + serviceAccount := true cmd := fmt.Sprintf( "/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs", @@ -248,9 +271,11 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv pod := v1.PodSpec{ AutomountServiceAccountToken: &serviceAccount, + ServiceAccountName: objMeta.Name, + RestartPolicy: v1.RestartPolicyNever, Volumes: []v1.Volume{ { - Name: "public", + Name: "artifacts", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, }, { @@ -274,7 +299,6 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv if artifact.Spec.OSRelease != "" { pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage)) - } if artifact.Spec.ISO || artifact.Spec.Netboot { @@ -297,10 +321,17 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer) } + // TODO: Shell out to `kubectl cp`? Why not? + // TODO: Does it make sense to build the image and not push it? Maybe remove + // this flag? if pushImage { - pod.Containers = []v1.Container{ - createImageContainer(r.ToolImage, artifact.Spec.PushOptions), - } + pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact.Spec.PushOptions)) + } + + pod.Containers = []v1.Container{ + // TODO: Add kubectl to osbuilder-tools? + //createPushToServerImageContainer(r.ToolImage), + createPushToServerImageContainer("bitnami/kubectl", r.ArtifactPodInfo), } jobLabels := genJobLabel(artifact.Name) @@ -308,7 +339,6 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv job := batchv1.Job{ ObjectMeta: objMeta, Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{MatchLabels: jobLabels}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: jobLabels, @@ -320,3 +350,110 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv return &job } + +// createServiceAccount creates a service account that has the permissions to +// copy the artifacts to the http server Pod. This service account is used for +// the "push to server" container. +func (r *OSArtifactReconciler) createCopierServiceAccount(ctx context.Context, objMeta metav1.ObjectMeta) error { + sa, err := r.clientSet.CoreV1(). + ServiceAccounts(objMeta.Namespace).Get(ctx, objMeta.Name, metav1.GetOptions{}) + if sa == nil || apierrors.IsNotFound(err) { + t := true + _, err := r.clientSet.CoreV1().ServiceAccounts(objMeta.Namespace).Create(ctx, + &v1.ServiceAccount{ + ObjectMeta: objMeta, + AutomountServiceAccountToken: &t, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + return err +} + +// func (r *OSArtifactReconciler) createCopierRole(ctx context.Context, objMeta metav1.ObjectMeta) error { +// role, err := r.clientSet.RbacV1(). +// Roles(objMeta.Namespace). +// Get(ctx, objMeta.Name, metav1.GetOptions{}) +// if role == nil || apierrors.IsNotFound(err) { +// _, err := r.clientSet.RbacV1().Roles(objMeta.Namespace).Create(ctx, +// &rbacv1.Role{ +// ObjectMeta: objMeta, +// Rules: []rbacv1.PolicyRule{ +// // TODO: The actual permissions we need is that to copy to a Pod. +// // The Pod is on another namespace, so we need a cluster wide permission. +// // This can get viral because the controller needs to have the permissions +// // if it is to grant them to the Job. +// { +// Verbs: []string{"list"}, +// APIGroups: []string{""}, +// Resources: []string{"pods"}, +// }, +// }, +// }, +// metav1.CreateOptions{}, +// ) +// if err != nil { +// return err +// } +// } + +// return err +// } + +func (r *OSArtifactReconciler) createCopierRoleBinding(ctx context.Context, objMeta metav1.ObjectMeta) error { + newrb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: objMeta.Name, + Namespace: r.ArtifactPodInfo.Namespace, + // TODO: We can't have cross-namespace owners. The role binding will have to deleted explicitly by the reconciler (finalizer?) + // OwnerReferences: objMeta.OwnerReferences, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: r.ArtifactPodInfo.Role, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, + }, + } + + rb, err := r.clientSet.RbacV1(). + RoleBindings(r.ArtifactPodInfo.Namespace). + Get(ctx, objMeta.Name, metav1.GetOptions{}) + if rb == nil || apierrors.IsNotFound(err) { + _, err := r.clientSet.RbacV1(). + RoleBindings(r.ArtifactPodInfo.Namespace). + Create(ctx, newrb, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + return err +} + +// createRBAC creates a ServiceAccount, and a binding to the CopierRole so that +// the container that copies the artifacts to the http server Pod has the +// permissions to do so. +func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error { + objMeta := genObjectMeta(artifact) + + err := r.createCopierServiceAccount(ctx, objMeta) + if err != nil { + return errors.Wrap(err, "creating a service account") + } + + err = r.createCopierRoleBinding(ctx, objMeta) + if err != nil { + return errors.Wrap(err, "creating a role binding for the copy-role") + } + + return err +} diff --git a/controllers/osartifact_controller.go b/controllers/osartifact_controller.go index 4d8514b..309b5e1 100644 --- a/controllers/osartifact_controller.go +++ b/controllers/osartifact_controller.go @@ -35,12 +35,28 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +type ArtifactPodInfo struct { + Label string + Namespace string + Path string + Role string +} + // OSArtifactReconciler reconciles a OSArtifact object type OSArtifactReconciler struct { client.Client Scheme *runtime.Scheme clientSet *kubernetes.Clientset ServingImage, ToolImage string + ArtifactPodInfo ArtifactPodInfo +} + +func genObjectMeta(artifact buildv1alpha1.OSArtifact) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: artifact.Name, + Namespace: artifact.Namespace, + OwnerReferences: genOwner(artifact), + } } func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference { @@ -100,6 +116,21 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger.Info(fmt.Sprintf("Checking deployment %v", osbuild)) + // TODO: We need to create the Role in the namespace where the nginx Pod is, + // so that the copier container has permissions to copy to that Pod. + // The nginx Pod should be defined in the OSArtifact CRD as in "when done + // write the results in this Namespace:Pod, under this path". + // The controller will try to create RBAC with the proper permissions but + // Kubernetes requires us to have the permissions before we grant them to others. + // This means the controller should have these permissions already. + // Since we control the nginx, we can make it so but if the user specifies + // some other Pod it may fail. Also, every OSArtifact will have to specify + // the nginx Pod which makes it cumbersome. + err = r.createRBAC(ctx, osbuild) + if err != nil { + return ctrl.Result{Requeue: true}, err + } + desiredJob := r.genJob(osbuild) job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{}) if job == nil || apierrors.IsNotFound(err) { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index b97f98e..bb1c0c8 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,20 +17,15 @@ limitations under the License. package controllers import ( - "path/filepath" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - - buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" //+kubebuilder:scaffold:imports ) diff --git a/main.go b/main.go index 61a2ae5..ab33883 100644 --- a/main.go +++ b/main.go @@ -53,11 +53,19 @@ func main() { var enableLeaderElection bool var probeAddr string var serveImage, toolImage string + var copyToPodLabel, copyToNamespace, copyToPath, copierRole string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.") // It needs luet inside flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.") + // Information on where to copy the artifacts + flag.StringVar(©ToPodLabel, "copy-to-pod-label", "", "The label of the Pod to which artifacts should be copied.") + flag.StringVar(©ToNamespace, "copy-to-namespace", "", "The namespace of the copy-to-pod-label Pod.") + flag.StringVar(©ToPath, "copy-to-path", "", "The path under which to copy artifacts in the copy-to-pod-label Pod.") + flag.StringVar(&copierRole, "copy-role", "", "The name or the Kubernetes Role that has the permissions to copy artifacts to the copy-to-pod-label Pod") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ @@ -98,7 +106,13 @@ func main() { Client: mgr.GetClient(), ServingImage: serveImage, ToolImage: toolImage, - Scheme: mgr.GetScheme(), + ArtifactPodInfo: controllers.ArtifactPodInfo{ + Label: copyToPodLabel, + Namespace: copyToNamespace, + Path: copyToPath, + Role: copierRole, + }, + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "OSArtifact") os.Exit(1) From 13463746503466c78ce00ea1e80e39099f868f6e Mon Sep 17 00:00:00 2001 From: Dimitris Karakasilis Date: Mon, 12 Dec 2022 18:09:31 +0200 Subject: [PATCH 3/6] Refactor kustomization, implement cleanup with finalizers and write tests Signed-off-by: Dimitris Karakasilis --- Makefile | 4 +- config/default/kustomization.yaml | 13 +++ config/default/manager_auth_proxy_patch.yaml | 2 +- config/dev/kustomization.yaml | 77 +------------- config/dev/manager_auth_proxy_patch.yaml | 40 -------- config/dev/manager_config_patch.yaml | 21 ---- config/nginx/role.yaml | 8 +- config/rbac/role_custom.yaml | 6 ++ controllers/job.go | 78 +++++++++++++- controllers/osartifact_controller.go | 94 +++++++++++++---- controllers/suite_test.go | 7 +- go.mod | 1 + go.sum | 3 + tests/e2e/e2e_simple_test.go | 101 ++++++++++++++++--- tests/fixtures/simple.yaml | 4 +- 15 files changed, 284 insertions(+), 175 deletions(-) delete mode 100644 config/dev/manager_auth_proxy_patch.yaml delete mode 100644 config/dev/manager_config_patch.yaml diff --git a/Makefile b/Makefile index ed94f3b..b13b70f 100644 --- a/Makefile +++ b/Makefile @@ -150,11 +150,9 @@ deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in # TODO: No need to build and then apply. `kubectl apply -k config/default` does the trick $(KUSTOMIZE) build config/default | kubectl apply -f - - .PHONY: deploy-dev deploy-dev: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/dev | kubectl apply -f - + kubectl apply -k config/dev .PHONY: undeploy undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 0639d7c..e66a6f2 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -73,3 +73,16 @@ vars: # kind: Service # version: v1 # name: webhook-service + +vars: +- name: NGINX_NAMESPACE + objref: + kind: Namespace + name: system + apiVersion: v1 + +- name: ARTIFACT_COPIER_ROLE + objref: + kind: Role + name: artifactCopier + apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index 0b4f144..fd03f29 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -40,5 +40,5 @@ spec: - "--leader-elect" - "--copy-to-namespace=$(NGINX_NAMESPACE)" - "--copy-role=$(ARTIFACT_COPIER_ROLE)" - - --copy-to-pod-label="app.kubernetes.io/name=osbuilder-nginx" + - --copy-to-pod-label=app.kubernetes.io/name=osbuilder-nginx - --copy-to-path="/usr/share/nginx/html" diff --git a/config/dev/kustomization.yaml b/config/dev/kustomization.yaml index 74d6d17..0f0959a 100644 --- a/config/dev/kustomization.yaml +++ b/config/dev/kustomization.yaml @@ -1,74 +1,7 @@ -# Adds namespace to all resources. -namespace: osartifactbuilder-operator-system - -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. -namePrefix: osartifactbuilder-operator- - -# Labels to add to all resources and selectors. -#commonLabels: -# someName: someValue - bases: -- ../crd -- ../rbac -- ../manager -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -#- ../webhook -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. -#- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -#- ../prometheus +- ../default -patchesStrategicMerge: -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -- manager_auth_proxy_patch.yaml - -# Mount the controller config file for loading manager configurations -# through a ComponentConfig type -#- manager_config_patch.yaml - -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -#- manager_webhook_patch.yaml - -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. -# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. -# 'CERTMANAGER' needs to be enabled to use ca injection -#- webhookcainjection_patch.yaml - -# the following config is for teaching kustomize how to do var substitution -vars: -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldref: -# fieldpath: metadata.namespace -#- name: CERTIFICATE_NAME -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -#- name: SERVICE_NAMESPACE # namespace of the service -# objref: -# kind: Service -# version: v1 -# name: webhook-service -# fieldref: -# fieldpath: metadata.namespace -#- name: SERVICE_NAME -# objref: -# kind: Service -# version: v1 -# name: webhook-service +images: +- name: quay.io/kairos/osbuilder + newName: quay.io/kairos/osbuilder + newTag: test diff --git a/config/dev/manager_auth_proxy_patch.yaml b/config/dev/manager_auth_proxy_patch.yaml deleted file mode 100644 index bf7ce9d..0000000 --- a/config/dev/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - securityContext: - allowPrivilegeEscalation: false - # TODO(user): uncomment for common cases that do not require escalating privileges - # capabilities: - # drop: - # - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" - ports: - - containerPort: 8443 - protocol: TCP - name: https - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - - name: manager - args: - - "--health-probe-bind-address=:8081" - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" diff --git a/config/dev/manager_config_patch.yaml b/config/dev/manager_config_patch.yaml deleted file mode 100644 index 2def14f..0000000 --- a/config/dev/manager_config_patch.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - imagePullPolicy: Never - args: - - "--config=controller_manager_config.yaml" - volumeMounts: - - name: manager-config - mountPath: /controller_manager_config.yaml - subPath: controller_manager_config.yaml - volumes: - - name: manager-config - configMap: - name: manager-config diff --git a/config/nginx/role.yaml b/config/nginx/role.yaml index 9d895b9..ddeb53e 100644 --- a/config/nginx/role.yaml +++ b/config/nginx/role.yaml @@ -9,4 +9,10 @@ rules: - pods verbs: - list - + - get +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create diff --git a/config/rbac/role_custom.yaml b/config/rbac/role_custom.yaml index a451127..358380f 100644 --- a/config/rbac/role_custom.yaml +++ b/config/rbac/role_custom.yaml @@ -87,3 +87,9 @@ rules: verbs: - list - get +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create diff --git a/controllers/job.go b/controllers/job.go index 326117d..231d0f5 100644 --- a/controllers/job.go +++ b/controllers/job.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "bytes" "context" "fmt" @@ -28,6 +29,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/remotecommand" ) func genJobLabel(s string) map[string]string { @@ -87,14 +91,15 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) } func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container { + command := fmt.Sprintf("tar cf - -C artifacts/ . | kubectl exec -i -n %s $(kubectl get pods -l %s -n %s --no-headers -o custom-columns=\":metadata.name\" | head -n1) -- tar xf - -C %s", artifactPodInfo.Namespace, artifactPodInfo.Label, artifactPodInfo.Namespace, artifactPodInfo.Path) + fmt.Printf("command = %+v\n", command) + return v1.Container{ ImagePullPolicy: v1.PullAlways, Name: "push-to-server", Image: containerImage, Command: []string{"/bin/bash", "-cxe"}, - Args: []string{ - fmt.Sprintf("kubectl get pods -n %s", artifactPodInfo.Namespace), - }, + Args: []string{command}, VolumeMounts: []v1.VolumeMount{ { Name: "rootfs", @@ -457,3 +462,70 @@ func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1a return err } + +// removeRBAC deletes the role binding between the service account of this artifact +// and the CopierRole. The ServiceAccount is removed automatically through the Owner +// relationship with the OSArtifact. The RoleBinding can't have it as an owner +// because it is in a different Namespace. +func (r *OSArtifactReconciler) removeRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error { + err := r.clientSet.RbacV1().RoleBindings(r.ArtifactPodInfo.Namespace). + Delete(ctx, artifact.Name, metav1.DeleteOptions{}) + // Ignore not found. No need to do anything. + if err != nil && apierrors.IsNotFound(err) { + return nil + } + + return err +} + +func (r *OSArtifactReconciler) removeArtifacts(ctx context.Context, artifact buildv1alpha1.OSArtifact) error { + //Finding Pods using labels + fmt.Printf("r.ArtifactPodInfo = %+v\n", r.ArtifactPodInfo.Label) + pods, err := r.clientSet.CoreV1().Pods(r.ArtifactPodInfo.Namespace). + List(ctx, metav1.ListOptions{LabelSelector: r.ArtifactPodInfo.Label}) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("listing pods with label %s in namespace %s", r.ArtifactPodInfo.Label, r.ArtifactPodInfo.Namespace)) + } + if len(pods.Items) < 1 { + return errors.New("No artifact pod found") + } + pod := pods.Items[0] + + stdout, stderr, err := r.executeRemoteCommand(r.ArtifactPodInfo.Namespace, pod.Name, fmt.Sprintf("rm -rf %s/%s.*", r.ArtifactPodInfo.Path, artifact.Name)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("%s\n%s", stdout, stderr)) + } + return nil +} + +func (r *OSArtifactReconciler) executeRemoteCommand(namespace, podName, command string) (string, string, error) { + buf := &bytes.Buffer{} + errBuf := &bytes.Buffer{} + request := r.clientSet.CoreV1().RESTClient(). + Post(). + Namespace(namespace). + Resource("pods"). + Name(podName). + SubResource("exec"). + VersionedParams(&v1.PodExecOptions{ + Command: []string{"/bin/sh", "-c", command}, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: true, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(r.restConfig, "POST", request.URL()) + if err != nil { + return "", "", err + } + err = exec.Stream(remotecommand.StreamOptions{ + Stdout: buf, + Stderr: errBuf, + }) + if err != nil { + return "", "", fmt.Errorf("%w Failed executing command %s on %v/%v", err, command, namespace, podName) + } + + return buf.String(), errBuf.String(), nil +} diff --git a/controllers/osartifact_controller.go b/controllers/osartifact_controller.go index 309b5e1..5cd1e09 100644 --- a/controllers/osartifact_controller.go +++ b/controllers/osartifact_controller.go @@ -25,16 +25,19 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" ) +const FinalizerName = "build.kairos.io/osbuilder-finalizer" + type ArtifactPodInfo struct { Label string Namespace string @@ -46,6 +49,7 @@ type ArtifactPodInfo struct { type OSArtifactReconciler struct { client.Client Scheme *runtime.Scheme + restConfig *rest.Config clientSet *kubernetes.Clientset ServingImage, ToolImage string ArtifactPodInfo ArtifactPodInfo @@ -73,6 +77,12 @@ func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference { //+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/status,verbs=get;update;patch //+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/finalizers,verbs=update +// TODO: Is this ^ how I should have created rbac permissions for the controller? +// - git commit all changes +// - generate code with kubebuilder +// - check if my permissions were removed +// - do it properly + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by @@ -95,17 +105,22 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger.Info(fmt.Sprintf("Reconciling %v", osbuild)) + stop, err := r.handleFinalizer(ctx, &osbuild) + if err != nil || stop { + return ctrl.Result{}, err + } + // generate configmap required for building a custom image desiredConfigMap := r.genConfigMap(osbuild) logger.Info(fmt.Sprintf("Checking configmap %v", osbuild)) - cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, v1.GetOptions{}) + cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, metav1.GetOptions{}) if cfgMap == nil || apierrors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating service %v", desiredConfigMap)) + logger.Info(fmt.Sprintf("Creating config map %v", desiredConfigMap)) - cfgMap, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, v1.CreateOptions{}) + _, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Failed while creating svc") + logger.Error(err, "Failed while creating config map") return ctrl.Result{}, err } return ctrl.Result{Requeue: true}, err @@ -116,27 +131,17 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger.Info(fmt.Sprintf("Checking deployment %v", osbuild)) - // TODO: We need to create the Role in the namespace where the nginx Pod is, - // so that the copier container has permissions to copy to that Pod. - // The nginx Pod should be defined in the OSArtifact CRD as in "when done - // write the results in this Namespace:Pod, under this path". - // The controller will try to create RBAC with the proper permissions but - // Kubernetes requires us to have the permissions before we grant them to others. - // This means the controller should have these permissions already. - // Since we control the nginx, we can make it so but if the user specifies - // some other Pod it may fail. Also, every OSArtifact will have to specify - // the nginx Pod which makes it cumbersome. err = r.createRBAC(ctx, osbuild) if err != nil { return ctrl.Result{Requeue: true}, err } desiredJob := r.genJob(osbuild) - job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{}) + job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, metav1.GetOptions{}) if job == nil || apierrors.IsNotFound(err) { logger.Info(fmt.Sprintf("Creating Job %v", job)) - job, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, v1.CreateOptions{}) + _, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, metav1.CreateOptions{}) if err != nil { logger.Error(err, "Failed while creating job") return ctrl.Result{}, nil @@ -179,13 +184,66 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) // SetupWithManager sets up the controller with the Manager. func (r *OSArtifactReconciler) SetupWithManager(mgr ctrl.Manager) error { - clientset, err := kubernetes.NewForConfig(mgr.GetConfig()) + cfg := mgr.GetConfig() + clientset, err := kubernetes.NewForConfig(cfg) if err != nil { return err } + r.restConfig = cfg r.clientSet = clientset return ctrl.NewControllerManagedBy(mgr). For(&buildv1alpha1.OSArtifact{}). Complete(r) } + +// Returns true if reconciliation should stop or false otherwise +func (r *OSArtifactReconciler) handleFinalizer(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) (bool, error) { + // examine DeletionTimestamp to determine if object is under deletion + if osbuild.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !controllerutil.ContainsFinalizer(osbuild, FinalizerName) { + controllerutil.AddFinalizer(osbuild, FinalizerName) + if err := r.Update(ctx, osbuild); err != nil { + return true, err + } + } + } else { + // The object is being deleted + if controllerutil.ContainsFinalizer(osbuild, FinalizerName) { + // our finalizer is present, so lets handle any external dependency + if err := r.finalize(ctx, osbuild); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return true, err + } + + // remove our finalizer from the list and update it. + controllerutil.RemoveFinalizer(osbuild, FinalizerName) + if err := r.Update(ctx, osbuild); err != nil { + return true, err + } + } + + // Stop reconciliation as the item is being deleted + return true, nil + } + + return false, nil +} + +// - Remove artifacts from the server Pod +// - Delete role-binding (because it doesn't have the OSArtifact as an owner and won't be deleted automatically) +func (r *OSArtifactReconciler) finalize(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) error { + if err := r.removeRBAC(ctx, *osbuild); err != nil { + return err + } + + if err := r.removeArtifacts(ctx, *osbuild); err != nil { + return err + } + + return nil +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index bb1c0c8..b97f98e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,15 +17,20 @@ limitations under the License. package controllers import ( + "path/filepath" "testing" - . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + + buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" //+kubebuilder:scaffold:imports ) diff --git a/go.mod b/go.mod index cd230d5..5928255 100644 --- a/go.mod +++ b/go.mod @@ -51,6 +51,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/go.sum b/go.sum index 4a58aaa..d8c53ab 100644 --- a/go.sum +++ b/go.sum @@ -82,6 +82,7 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -136,6 +137,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= @@ -375,6 +377,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/tests/e2e/e2e_simple_test.go b/tests/e2e/e2e_simple_test.go index ab87494..62511d7 100644 --- a/tests/e2e/e2e_simple_test.go +++ b/tests/e2e/e2e_simple_test.go @@ -1,6 +1,10 @@ package e2e_test import ( + "bytes" + "fmt" + "os/exec" + "strings" "time" . "github.com/onsi/ginkgo/v2" @@ -20,20 +24,91 @@ var _ = Describe("ISO build test", func() { err := kubectl.Apply("", "../../tests/fixtures/simple.yaml") Expect(err).ToNot(HaveOccurred()) - Eventually(func() string { - b, _ := kubectl.GetData("default", "osartifacts", "hello-kairos", "jsonpath={.spec.imageName}") - return string(b) - }, 2*time.Minute, 2*time.Second).Should(Equal("quay.io/kairos/core-opensuse:latest")) + itHasTheCorrectImage() + itHasTheCorrectLabels() + itCopiesTheArtifacts() - Eventually(func() string { - b, _ := kubectl.GetData("default", "deployments", "hello-kairos", "jsonpath={.spec.template.metadata.labels.osbuild}") - return string(b) - }, 2*time.Minute, 2*time.Second).Should(Equal("workloadhello-kairos")) - Eventually(func() string { - b, _ := kubectl.GetData("default", "deployments", "hello-kairos", "jsonpath={.spec.status.unavailableReplicas}") - return string(b) - }, 15*time.Minute, 2*time.Second).ShouldNot(Equal("1")) + By("deleting the custom resource", func() { + err = kubectl.New().Delete("osartifacts", "-n", "default", "hello-kairos") + Expect(err).ToNot(HaveOccurred()) + }) + + itCleansUpRoleBindings() + itDeletesTheArtifacts() }) }) - }) + +func itHasTheCorrectImage() { + Eventually(func() string { + b, _ := kubectl.GetData("default", "osartifacts", "hello-kairos", "jsonpath={.spec.imageName}") + fmt.Printf("looking for image core-opensuse:latest = %+v\n", string(b)) + return string(b) + }, 2*time.Minute, 2*time.Second).Should(Equal("quay.io/kairos/core-opensuse:latest")) +} + +func itHasTheCorrectLabels() { + Eventually(func() string { + b, _ := kubectl.GetData("default", "jobs", "hello-kairos", "jsonpath={.spec.template.metadata.labels.osbuild}") + fmt.Printf("looking for label workloadhello-kairos = %+v\n", string(b)) + return string(b) + }, 2*time.Minute, 2*time.Second).Should(Equal("workloadhello-kairos")) +} + +func itCopiesTheArtifacts() { + nginxNamespace := "osartifactbuilder-operator-system" + Eventually(func() string { + podName := strings.TrimSpace(findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx")) + + out, _ := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html") + + return out + }, 15*time.Minute, 2*time.Second).Should(MatchRegexp("hello-kairos.iso")) +} + +func itCleansUpRoleBindings() { + nginxNamespace := "osartifactbuilder-operator-system" + Eventually(func() string { + rb := findRoleBindings(nginxNamespace) + + return rb + }, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos")) +} + +func itDeletesTheArtifacts() { + nginxNamespace := "osartifactbuilder-operator-system" + Eventually(func() string { + podName := findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx") + + out, err := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html") + Expect(err).ToNot(HaveOccurred(), out) + + return out + }, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos.iso")) +} + +func findPodsWithLabel(namespace, label string) string { + kubectlCommand := fmt.Sprintf("kubectl get pods -n %s -l %s --no-headers -o custom-columns=\":metadata.name\" | head -n1", namespace, label) + cmd := exec.Command("bash", "-c", kubectlCommand) + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + Expect(err).ToNot(HaveOccurred(), stderr.String()) + + return strings.TrimSpace(out.String()) +} + +func findRoleBindings(namespace string) string { + kubectlCommand := fmt.Sprintf("kubectl get rolebindings -n %s --no-headers -o custom-columns=\":metadata.name\"", namespace) + cmd := exec.Command("bash", "-c", kubectlCommand) + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + Expect(err).ToNot(HaveOccurred(), stderr.String()) + + return strings.TrimSpace(out.String()) +} diff --git a/tests/fixtures/simple.yaml b/tests/fixtures/simple.yaml index 212b607..bd5e5b7 100644 --- a/tests/fixtures/simple.yaml +++ b/tests/fixtures/simple.yaml @@ -6,7 +6,7 @@ spec: imageName: "quay.io/kairos/core-opensuse:latest" iso: true bundles: - - quay.io/kairos/packages:goreleaser-utils-1.11.1 + - quay.io/kairos/packages:goreleaser-utils-1.13.1 grubConfig: | search --file --set=root /boot/kernel.xz set default=0 @@ -49,4 +49,4 @@ spec: device: "/dev/sda" reboot: true poweroff: true - auto: true # Required, for automated installations \ No newline at end of file + auto: true # Required, for automated installations From 782c7caac9e1206e23ed9909bbe81a73d5cff873 Mon Sep 17 00:00:00 2001 From: Dimitris Karakasilis Date: Wed, 14 Dec 2022 16:55:25 +0200 Subject: [PATCH 4/6] Add test pipeline to PRs Signed-off-by: Dimitris Karakasilis --- .github/workflows/test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 15c0338..9041804 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,6 +7,7 @@ on: - master tags: - '*' + pull_request: jobs: docker: @@ -16,4 +17,4 @@ jobs: uses: actions/checkout@v2 - name: Test run: | - make kind-e2e-tests \ No newline at end of file + make kind-e2e-tests From 4e5e383b782f625a3b39deec2c3ed9ae64cfc61e Mon Sep 17 00:00:00 2001 From: Dimitris Karakasilis Date: Thu, 15 Dec 2022 10:42:48 +0200 Subject: [PATCH 5/6] Remove not needed permission to manage services Signed-off-by: Dimitris Karakasilis --- config/rbac/role_custom.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/config/rbac/role_custom.yaml b/config/rbac/role_custom.yaml index 358380f..0cfb4b6 100644 --- a/config/rbac/role_custom.yaml +++ b/config/rbac/role_custom.yaml @@ -65,7 +65,6 @@ rules: - apiGroups: - "" resources: - - services - configmaps verbs: - get From cfc4ebf3081e7a271b0373cee304926dc603ffb6 Mon Sep 17 00:00:00 2001 From: Dimitris Karakasilis Date: Thu, 15 Dec 2022 12:29:49 +0200 Subject: [PATCH 6/6] Un-hardcode the copier image Signed-off-by: Dimitris Karakasilis --- controllers/job.go | 5 +---- controllers/osartifact_controller.go | 10 +++++----- main.go | 5 ++++- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/controllers/job.go b/controllers/job.go index 231d0f5..411418a 100644 --- a/controllers/job.go +++ b/controllers/job.go @@ -326,7 +326,6 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer) } - // TODO: Shell out to `kubectl cp`? Why not? // TODO: Does it make sense to build the image and not push it? Maybe remove // this flag? if pushImage { @@ -334,9 +333,7 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv } pod.Containers = []v1.Container{ - // TODO: Add kubectl to osbuilder-tools? - //createPushToServerImageContainer(r.ToolImage), - createPushToServerImageContainer("bitnami/kubectl", r.ArtifactPodInfo), + createPushToServerImageContainer(r.CopierImage, r.ArtifactPodInfo), } jobLabels := genJobLabel(artifact.Name) diff --git a/controllers/osartifact_controller.go b/controllers/osartifact_controller.go index 5cd1e09..bdeab87 100644 --- a/controllers/osartifact_controller.go +++ b/controllers/osartifact_controller.go @@ -48,11 +48,11 @@ type ArtifactPodInfo struct { // OSArtifactReconciler reconciles a OSArtifact object type OSArtifactReconciler struct { client.Client - Scheme *runtime.Scheme - restConfig *rest.Config - clientSet *kubernetes.Clientset - ServingImage, ToolImage string - ArtifactPodInfo ArtifactPodInfo + Scheme *runtime.Scheme + restConfig *rest.Config + clientSet *kubernetes.Clientset + ServingImage, ToolImage, CopierImage string + ArtifactPodInfo ArtifactPodInfo } func genObjectMeta(artifact buildv1alpha1.OSArtifact) metav1.ObjectMeta { diff --git a/main.go b/main.go index ab33883..1bc3c37 100644 --- a/main.go +++ b/main.go @@ -52,10 +52,12 @@ func main() { var metricsAddr string var enableLeaderElection bool var probeAddr string - var serveImage, toolImage string + var serveImage, toolImage, copierImage string var copyToPodLabel, copyToNamespace, copyToPath, copierRole string flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + + flag.StringVar(&copierImage, "copier-image", "quay.io/kairos/kubectl", "The image that is used to copy artifacts to the server pod.") flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.") // It needs luet inside flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.") @@ -106,6 +108,7 @@ func main() { Client: mgr.GetClient(), ServingImage: serveImage, ToolImage: toolImage, + CopierImage: copierImage, ArtifactPodInfo: controllers.ArtifactPodInfo{ Label: copyToPodLabel, Namespace: copyToNamespace,