diff --git a/Makefile b/Makefile index ed94f3b..b13b70f 100644 --- a/Makefile +++ b/Makefile @@ -150,11 +150,9 @@ deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in # TODO: No need to build and then apply. `kubectl apply -k config/default` does the trick $(KUSTOMIZE) build config/default | kubectl apply -f - - .PHONY: deploy-dev deploy-dev: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/dev | kubectl apply -f - + kubectl apply -k config/dev .PHONY: undeploy undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 0639d7c..e66a6f2 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -73,3 +73,16 @@ vars: # kind: Service # version: v1 # name: webhook-service + +vars: +- name: NGINX_NAMESPACE + objref: + kind: Namespace + name: system + apiVersion: v1 + +- name: ARTIFACT_COPIER_ROLE + objref: + kind: Role + name: artifactCopier + apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index 0b4f144..fd03f29 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -40,5 +40,5 @@ spec: - "--leader-elect" - "--copy-to-namespace=$(NGINX_NAMESPACE)" - "--copy-role=$(ARTIFACT_COPIER_ROLE)" - - --copy-to-pod-label="app.kubernetes.io/name=osbuilder-nginx" + - --copy-to-pod-label=app.kubernetes.io/name=osbuilder-nginx - --copy-to-path="/usr/share/nginx/html" diff --git a/config/dev/kustomization.yaml b/config/dev/kustomization.yaml index 74d6d17..0f0959a 100644 --- a/config/dev/kustomization.yaml +++ b/config/dev/kustomization.yaml @@ -1,74 +1,7 @@ -# Adds namespace to all resources. -namespace: osartifactbuilder-operator-system - -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. -namePrefix: osartifactbuilder-operator- - -# Labels to add to all resources and selectors. -#commonLabels: -# someName: someValue - bases: -- ../crd -- ../rbac -- ../manager -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -#- ../webhook -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. -#- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -#- ../prometheus +- ../default -patchesStrategicMerge: -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -- manager_auth_proxy_patch.yaml - -# Mount the controller config file for loading manager configurations -# through a ComponentConfig type -#- manager_config_patch.yaml - -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -#- manager_webhook_patch.yaml - -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. -# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. -# 'CERTMANAGER' needs to be enabled to use ca injection -#- webhookcainjection_patch.yaml - -# the following config is for teaching kustomize how to do var substitution -vars: -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldref: -# fieldpath: metadata.namespace -#- name: CERTIFICATE_NAME -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -#- name: SERVICE_NAMESPACE # namespace of the service -# objref: -# kind: Service -# version: v1 -# name: webhook-service -# fieldref: -# fieldpath: metadata.namespace -#- name: SERVICE_NAME -# objref: -# kind: Service -# version: v1 -# name: webhook-service +images: +- name: quay.io/kairos/osbuilder + newName: quay.io/kairos/osbuilder + newTag: test diff --git a/config/dev/manager_auth_proxy_patch.yaml b/config/dev/manager_auth_proxy_patch.yaml deleted file mode 100644 index bf7ce9d..0000000 --- a/config/dev/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - securityContext: - allowPrivilegeEscalation: false - # TODO(user): uncomment for common cases that do not require escalating privileges - # capabilities: - # drop: - # - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" - ports: - - containerPort: 8443 - protocol: TCP - name: https - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - - name: manager - args: - - "--health-probe-bind-address=:8081" - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" diff --git a/config/dev/manager_config_patch.yaml b/config/dev/manager_config_patch.yaml deleted file mode 100644 index 2def14f..0000000 --- a/config/dev/manager_config_patch.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - imagePullPolicy: Never - args: - - "--config=controller_manager_config.yaml" - volumeMounts: - - name: manager-config - mountPath: /controller_manager_config.yaml - subPath: controller_manager_config.yaml - volumes: - - name: manager-config - configMap: - name: manager-config diff --git a/config/nginx/role.yaml b/config/nginx/role.yaml index 9d895b9..ddeb53e 100644 --- a/config/nginx/role.yaml +++ b/config/nginx/role.yaml @@ -9,4 +9,10 @@ rules: - pods verbs: - list - + - get +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create diff --git a/config/rbac/role_custom.yaml b/config/rbac/role_custom.yaml index a451127..358380f 100644 --- a/config/rbac/role_custom.yaml +++ b/config/rbac/role_custom.yaml @@ -87,3 +87,9 @@ rules: verbs: - list - get +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create diff --git a/controllers/job.go b/controllers/job.go index 326117d..231d0f5 100644 --- a/controllers/job.go +++ b/controllers/job.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "bytes" "context" "fmt" @@ -28,6 +29,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/remotecommand" ) func genJobLabel(s string) map[string]string { @@ -87,14 +91,15 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) } func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container { + command := fmt.Sprintf("tar cf - -C artifacts/ . | kubectl exec -i -n %s $(kubectl get pods -l %s -n %s --no-headers -o custom-columns=\":metadata.name\" | head -n1) -- tar xf - -C %s", artifactPodInfo.Namespace, artifactPodInfo.Label, artifactPodInfo.Namespace, artifactPodInfo.Path) + fmt.Printf("command = %+v\n", command) + return v1.Container{ ImagePullPolicy: v1.PullAlways, Name: "push-to-server", Image: containerImage, Command: []string{"/bin/bash", "-cxe"}, - Args: []string{ - fmt.Sprintf("kubectl get pods -n %s", artifactPodInfo.Namespace), - }, + Args: []string{command}, VolumeMounts: []v1.VolumeMount{ { Name: "rootfs", @@ -457,3 +462,70 @@ func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1a return err } + +// removeRBAC deletes the role binding between the service account of this artifact +// and the CopierRole. The ServiceAccount is removed automatically through the Owner +// relationship with the OSArtifact. The RoleBinding can't have it as an owner +// because it is in a different Namespace. +func (r *OSArtifactReconciler) removeRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error { + err := r.clientSet.RbacV1().RoleBindings(r.ArtifactPodInfo.Namespace). + Delete(ctx, artifact.Name, metav1.DeleteOptions{}) + // Ignore not found. No need to do anything. + if err != nil && apierrors.IsNotFound(err) { + return nil + } + + return err +} + +func (r *OSArtifactReconciler) removeArtifacts(ctx context.Context, artifact buildv1alpha1.OSArtifact) error { + //Finding Pods using labels + fmt.Printf("r.ArtifactPodInfo = %+v\n", r.ArtifactPodInfo.Label) + pods, err := r.clientSet.CoreV1().Pods(r.ArtifactPodInfo.Namespace). + List(ctx, metav1.ListOptions{LabelSelector: r.ArtifactPodInfo.Label}) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("listing pods with label %s in namespace %s", r.ArtifactPodInfo.Label, r.ArtifactPodInfo.Namespace)) + } + if len(pods.Items) < 1 { + return errors.New("No artifact pod found") + } + pod := pods.Items[0] + + stdout, stderr, err := r.executeRemoteCommand(r.ArtifactPodInfo.Namespace, pod.Name, fmt.Sprintf("rm -rf %s/%s.*", r.ArtifactPodInfo.Path, artifact.Name)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("%s\n%s", stdout, stderr)) + } + return nil +} + +func (r *OSArtifactReconciler) executeRemoteCommand(namespace, podName, command string) (string, string, error) { + buf := &bytes.Buffer{} + errBuf := &bytes.Buffer{} + request := r.clientSet.CoreV1().RESTClient(). + Post(). + Namespace(namespace). + Resource("pods"). + Name(podName). + SubResource("exec"). + VersionedParams(&v1.PodExecOptions{ + Command: []string{"/bin/sh", "-c", command}, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: true, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(r.restConfig, "POST", request.URL()) + if err != nil { + return "", "", err + } + err = exec.Stream(remotecommand.StreamOptions{ + Stdout: buf, + Stderr: errBuf, + }) + if err != nil { + return "", "", fmt.Errorf("%w Failed executing command %s on %v/%v", err, command, namespace, podName) + } + + return buf.String(), errBuf.String(), nil +} diff --git a/controllers/osartifact_controller.go b/controllers/osartifact_controller.go index 309b5e1..5cd1e09 100644 --- a/controllers/osartifact_controller.go +++ b/controllers/osartifact_controller.go @@ -25,16 +25,19 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" ) +const FinalizerName = "build.kairos.io/osbuilder-finalizer" + type ArtifactPodInfo struct { Label string Namespace string @@ -46,6 +49,7 @@ type ArtifactPodInfo struct { type OSArtifactReconciler struct { client.Client Scheme *runtime.Scheme + restConfig *rest.Config clientSet *kubernetes.Clientset ServingImage, ToolImage string ArtifactPodInfo ArtifactPodInfo @@ -73,6 +77,12 @@ func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference { //+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/status,verbs=get;update;patch //+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/finalizers,verbs=update +// TODO: Is this ^ how I should have created rbac permissions for the controller? +// - git commit all changes +// - generate code with kubebuilder +// - check if my permissions were removed +// - do it properly + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by @@ -95,17 +105,22 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger.Info(fmt.Sprintf("Reconciling %v", osbuild)) + stop, err := r.handleFinalizer(ctx, &osbuild) + if err != nil || stop { + return ctrl.Result{}, err + } + // generate configmap required for building a custom image desiredConfigMap := r.genConfigMap(osbuild) logger.Info(fmt.Sprintf("Checking configmap %v", osbuild)) - cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, v1.GetOptions{}) + cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, metav1.GetOptions{}) if cfgMap == nil || apierrors.IsNotFound(err) { - logger.Info(fmt.Sprintf("Creating service %v", desiredConfigMap)) + logger.Info(fmt.Sprintf("Creating config map %v", desiredConfigMap)) - cfgMap, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, v1.CreateOptions{}) + _, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, metav1.CreateOptions{}) if err != nil { - logger.Error(err, "Failed while creating svc") + logger.Error(err, "Failed while creating config map") return ctrl.Result{}, err } return ctrl.Result{Requeue: true}, err @@ -116,27 +131,17 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger.Info(fmt.Sprintf("Checking deployment %v", osbuild)) - // TODO: We need to create the Role in the namespace where the nginx Pod is, - // so that the copier container has permissions to copy to that Pod. - // The nginx Pod should be defined in the OSArtifact CRD as in "when done - // write the results in this Namespace:Pod, under this path". - // The controller will try to create RBAC with the proper permissions but - // Kubernetes requires us to have the permissions before we grant them to others. - // This means the controller should have these permissions already. - // Since we control the nginx, we can make it so but if the user specifies - // some other Pod it may fail. Also, every OSArtifact will have to specify - // the nginx Pod which makes it cumbersome. err = r.createRBAC(ctx, osbuild) if err != nil { return ctrl.Result{Requeue: true}, err } desiredJob := r.genJob(osbuild) - job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{}) + job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, metav1.GetOptions{}) if job == nil || apierrors.IsNotFound(err) { logger.Info(fmt.Sprintf("Creating Job %v", job)) - job, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, v1.CreateOptions{}) + _, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, metav1.CreateOptions{}) if err != nil { logger.Error(err, "Failed while creating job") return ctrl.Result{}, nil @@ -179,13 +184,66 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) // SetupWithManager sets up the controller with the Manager. func (r *OSArtifactReconciler) SetupWithManager(mgr ctrl.Manager) error { - clientset, err := kubernetes.NewForConfig(mgr.GetConfig()) + cfg := mgr.GetConfig() + clientset, err := kubernetes.NewForConfig(cfg) if err != nil { return err } + r.restConfig = cfg r.clientSet = clientset return ctrl.NewControllerManagedBy(mgr). For(&buildv1alpha1.OSArtifact{}). Complete(r) } + +// Returns true if reconciliation should stop or false otherwise +func (r *OSArtifactReconciler) handleFinalizer(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) (bool, error) { + // examine DeletionTimestamp to determine if object is under deletion + if osbuild.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !controllerutil.ContainsFinalizer(osbuild, FinalizerName) { + controllerutil.AddFinalizer(osbuild, FinalizerName) + if err := r.Update(ctx, osbuild); err != nil { + return true, err + } + } + } else { + // The object is being deleted + if controllerutil.ContainsFinalizer(osbuild, FinalizerName) { + // our finalizer is present, so lets handle any external dependency + if err := r.finalize(ctx, osbuild); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return true, err + } + + // remove our finalizer from the list and update it. + controllerutil.RemoveFinalizer(osbuild, FinalizerName) + if err := r.Update(ctx, osbuild); err != nil { + return true, err + } + } + + // Stop reconciliation as the item is being deleted + return true, nil + } + + return false, nil +} + +// - Remove artifacts from the server Pod +// - Delete role-binding (because it doesn't have the OSArtifact as an owner and won't be deleted automatically) +func (r *OSArtifactReconciler) finalize(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) error { + if err := r.removeRBAC(ctx, *osbuild); err != nil { + return err + } + + if err := r.removeArtifacts(ctx, *osbuild); err != nil { + return err + } + + return nil +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index bb1c0c8..b97f98e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,15 +17,20 @@ limitations under the License. package controllers import ( + "path/filepath" "testing" - . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + + buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" //+kubebuilder:scaffold:imports ) diff --git a/go.mod b/go.mod index cd230d5..5928255 100644 --- a/go.mod +++ b/go.mod @@ -51,6 +51,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect diff --git a/go.sum b/go.sum index 4a58aaa..d8c53ab 100644 --- a/go.sum +++ b/go.sum @@ -82,6 +82,7 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -136,6 +137,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= @@ -375,6 +377,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/tests/e2e/e2e_simple_test.go b/tests/e2e/e2e_simple_test.go index ab87494..62511d7 100644 --- a/tests/e2e/e2e_simple_test.go +++ b/tests/e2e/e2e_simple_test.go @@ -1,6 +1,10 @@ package e2e_test import ( + "bytes" + "fmt" + "os/exec" + "strings" "time" . "github.com/onsi/ginkgo/v2" @@ -20,20 +24,91 @@ var _ = Describe("ISO build test", func() { err := kubectl.Apply("", "../../tests/fixtures/simple.yaml") Expect(err).ToNot(HaveOccurred()) - Eventually(func() string { - b, _ := kubectl.GetData("default", "osartifacts", "hello-kairos", "jsonpath={.spec.imageName}") - return string(b) - }, 2*time.Minute, 2*time.Second).Should(Equal("quay.io/kairos/core-opensuse:latest")) + itHasTheCorrectImage() + itHasTheCorrectLabels() + itCopiesTheArtifacts() - Eventually(func() string { - b, _ := kubectl.GetData("default", "deployments", "hello-kairos", "jsonpath={.spec.template.metadata.labels.osbuild}") - return string(b) - }, 2*time.Minute, 2*time.Second).Should(Equal("workloadhello-kairos")) - Eventually(func() string { - b, _ := kubectl.GetData("default", "deployments", "hello-kairos", "jsonpath={.spec.status.unavailableReplicas}") - return string(b) - }, 15*time.Minute, 2*time.Second).ShouldNot(Equal("1")) + By("deleting the custom resource", func() { + err = kubectl.New().Delete("osartifacts", "-n", "default", "hello-kairos") + Expect(err).ToNot(HaveOccurred()) + }) + + itCleansUpRoleBindings() + itDeletesTheArtifacts() }) }) - }) + +func itHasTheCorrectImage() { + Eventually(func() string { + b, _ := kubectl.GetData("default", "osartifacts", "hello-kairos", "jsonpath={.spec.imageName}") + fmt.Printf("looking for image core-opensuse:latest = %+v\n", string(b)) + return string(b) + }, 2*time.Minute, 2*time.Second).Should(Equal("quay.io/kairos/core-opensuse:latest")) +} + +func itHasTheCorrectLabels() { + Eventually(func() string { + b, _ := kubectl.GetData("default", "jobs", "hello-kairos", "jsonpath={.spec.template.metadata.labels.osbuild}") + fmt.Printf("looking for label workloadhello-kairos = %+v\n", string(b)) + return string(b) + }, 2*time.Minute, 2*time.Second).Should(Equal("workloadhello-kairos")) +} + +func itCopiesTheArtifacts() { + nginxNamespace := "osartifactbuilder-operator-system" + Eventually(func() string { + podName := strings.TrimSpace(findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx")) + + out, _ := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html") + + return out + }, 15*time.Minute, 2*time.Second).Should(MatchRegexp("hello-kairos.iso")) +} + +func itCleansUpRoleBindings() { + nginxNamespace := "osartifactbuilder-operator-system" + Eventually(func() string { + rb := findRoleBindings(nginxNamespace) + + return rb + }, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos")) +} + +func itDeletesTheArtifacts() { + nginxNamespace := "osartifactbuilder-operator-system" + Eventually(func() string { + podName := findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx") + + out, err := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html") + Expect(err).ToNot(HaveOccurred(), out) + + return out + }, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos.iso")) +} + +func findPodsWithLabel(namespace, label string) string { + kubectlCommand := fmt.Sprintf("kubectl get pods -n %s -l %s --no-headers -o custom-columns=\":metadata.name\" | head -n1", namespace, label) + cmd := exec.Command("bash", "-c", kubectlCommand) + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + Expect(err).ToNot(HaveOccurred(), stderr.String()) + + return strings.TrimSpace(out.String()) +} + +func findRoleBindings(namespace string) string { + kubectlCommand := fmt.Sprintf("kubectl get rolebindings -n %s --no-headers -o custom-columns=\":metadata.name\"", namespace) + cmd := exec.Command("bash", "-c", kubectlCommand) + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + err := cmd.Run() + Expect(err).ToNot(HaveOccurred(), stderr.String()) + + return strings.TrimSpace(out.String()) +} diff --git a/tests/fixtures/simple.yaml b/tests/fixtures/simple.yaml index 212b607..bd5e5b7 100644 --- a/tests/fixtures/simple.yaml +++ b/tests/fixtures/simple.yaml @@ -6,7 +6,7 @@ spec: imageName: "quay.io/kairos/core-opensuse:latest" iso: true bundles: - - quay.io/kairos/packages:goreleaser-utils-1.11.1 + - quay.io/kairos/packages:goreleaser-utils-1.13.1 grubConfig: | search --file --set=root /boot/kernel.xz set default=0 @@ -49,4 +49,4 @@ spec: device: "/dev/sda" reboot: true poweroff: true - auto: true # Required, for automated installations \ No newline at end of file + auto: true # Required, for automated installations