diff --git a/Makefile b/Makefile index 056a692..ed94f3b 100644 --- a/Makefile +++ b/Makefile @@ -147,6 +147,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified .PHONY: deploy deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + # TODO: No need to build and then apply. `kubectl apply -k config/default` does the trick $(KUSTOMIZE) build config/default | kubectl apply -f - @@ -282,4 +283,4 @@ kind-e2e-tests: ginkgo kind-setup install undeploy-dev deploy-dev e2e-tests kubesplit: manifests kustomize rm -rf helm-chart mkdir helm-chart - $(KUSTOMIZE) build config/default | kubesplit -helm helm-chart \ No newline at end of file + $(KUSTOMIZE) build config/default | kubesplit -helm helm-chart diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index bf7ce9d..0b4f144 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -38,3 +38,7 @@ spec: - "--health-probe-bind-address=:8081" - "--metrics-bind-address=127.0.0.1:8080" - "--leader-elect" + - "--copy-to-namespace=$(NGINX_NAMESPACE)" + - "--copy-role=$(ARTIFACT_COPIER_ROLE)" + - --copy-to-pod-label="app.kubernetes.io/name=osbuilder-nginx" + - --copy-to-path="/usr/share/nginx/html" diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 26eef16..437bd4d 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,3 +1,10 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/config/nginx/kustomization.yaml b/config/nginx/kustomization.yaml index a944d00..d587479 100644 --- a/config/nginx/kustomization.yaml +++ b/config/nginx/kustomization.yaml @@ -1,3 +1,4 @@ resources: - deployment.yaml - service.yaml +- role.yaml diff --git a/config/nginx/role.yaml b/config/nginx/role.yaml new file mode 100644 index 0000000..9d895b9 --- /dev/null +++ b/config/nginx/role.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: artifactCopier +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list + diff --git a/config/rbac/role_custom.yaml b/config/rbac/role_custom.yaml index 53e0dbd..a451127 100644 --- a/config/rbac/role_custom.yaml +++ b/config/rbac/role_custom.yaml @@ -5,6 +5,31 @@ metadata: creationTimestamp: null name: manager-role rules: +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + - roles + - rolebindings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - build.kairos.io resources: @@ -54,3 +79,11 @@ rules: - get - create - update +# Temporary so that it can grant these permissions to the created role +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get diff --git a/controllers/job.go b/controllers/job.go index 3eaf1be..326117d 100644 --- a/controllers/job.go +++ b/controllers/job.go @@ -17,12 +17,16 @@ limitations under the License. package controllers import ( + "context" "fmt" buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" + "github.com/pkg/errors" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -34,6 +38,7 @@ func genJobLabel(s string) map[string]string { // TODO: Handle registry auth // TODO: This shells out, but needs ENV_VAR with key refs mapping +// TODO: Cache downloaded images? func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container { return v1.Container{ ImagePullPolicy: v1.PullAlways, @@ -81,6 +86,28 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) } } +func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container { + return v1.Container{ + ImagePullPolicy: v1.PullAlways, + Name: "push-to-server", + Image: containerImage, + Command: []string{"/bin/bash", "-cxe"}, + Args: []string{ + fmt.Sprintf("kubectl get pods -n %s", artifactPodInfo.Namespace), + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "rootfs", + MountPath: "/rootfs", + }, + { + Name: "artifacts", + MountPath: "/artifacts", + }, + }, + } +} + func osReleaseContainer(containerImage string) v1.Container { return v1.Container{ ImagePullPolicy: v1.PullAlways, @@ -105,16 +132,12 @@ func osReleaseContainer(containerImage string) v1.Container { } func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job { - objMeta := metav1.ObjectMeta{ - Name: artifact.Name, - Namespace: artifact.Namespace, - OwnerReferences: genOwner(artifact), - } + objMeta := genObjectMeta(artifact) pushImage := artifact.Spec.PushOptions.Push privileged := false - serviceAccount := false + serviceAccount := true cmd := fmt.Sprintf( "/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs", @@ -248,9 +271,11 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv pod := v1.PodSpec{ AutomountServiceAccountToken: &serviceAccount, + ServiceAccountName: objMeta.Name, + RestartPolicy: v1.RestartPolicyNever, Volumes: []v1.Volume{ { - Name: "public", + Name: "artifacts", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, }, { @@ -274,7 +299,6 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv if artifact.Spec.OSRelease != "" { pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage)) - } if artifact.Spec.ISO || artifact.Spec.Netboot { @@ -297,10 +321,17 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer) } + // TODO: Shell out to `kubectl cp`? Why not? + // TODO: Does it make sense to build the image and not push it? Maybe remove + // this flag? if pushImage { - pod.Containers = []v1.Container{ - createImageContainer(r.ToolImage, artifact.Spec.PushOptions), - } + pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact.Spec.PushOptions)) + } + + pod.Containers = []v1.Container{ + // TODO: Add kubectl to osbuilder-tools? + //createPushToServerImageContainer(r.ToolImage), + createPushToServerImageContainer("bitnami/kubectl", r.ArtifactPodInfo), } jobLabels := genJobLabel(artifact.Name) @@ -308,7 +339,6 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv job := batchv1.Job{ ObjectMeta: objMeta, Spec: batchv1.JobSpec{ - Selector: &metav1.LabelSelector{MatchLabels: jobLabels}, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: jobLabels, @@ -320,3 +350,110 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv return &job } + +// createServiceAccount creates a service account that has the permissions to +// copy the artifacts to the http server Pod. This service account is used for +// the "push to server" container. +func (r *OSArtifactReconciler) createCopierServiceAccount(ctx context.Context, objMeta metav1.ObjectMeta) error { + sa, err := r.clientSet.CoreV1(). + ServiceAccounts(objMeta.Namespace).Get(ctx, objMeta.Name, metav1.GetOptions{}) + if sa == nil || apierrors.IsNotFound(err) { + t := true + _, err := r.clientSet.CoreV1().ServiceAccounts(objMeta.Namespace).Create(ctx, + &v1.ServiceAccount{ + ObjectMeta: objMeta, + AutomountServiceAccountToken: &t, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + return err +} + +// func (r *OSArtifactReconciler) createCopierRole(ctx context.Context, objMeta metav1.ObjectMeta) error { +// role, err := r.clientSet.RbacV1(). +// Roles(objMeta.Namespace). +// Get(ctx, objMeta.Name, metav1.GetOptions{}) +// if role == nil || apierrors.IsNotFound(err) { +// _, err := r.clientSet.RbacV1().Roles(objMeta.Namespace).Create(ctx, +// &rbacv1.Role{ +// ObjectMeta: objMeta, +// Rules: []rbacv1.PolicyRule{ +// // TODO: The actual permissions we need is that to copy to a Pod. +// // The Pod is on another namespace, so we need a cluster wide permission. +// // This can get viral because the controller needs to have the permissions +// // if it is to grant them to the Job. +// { +// Verbs: []string{"list"}, +// APIGroups: []string{""}, +// Resources: []string{"pods"}, +// }, +// }, +// }, +// metav1.CreateOptions{}, +// ) +// if err != nil { +// return err +// } +// } + +// return err +// } + +func (r *OSArtifactReconciler) createCopierRoleBinding(ctx context.Context, objMeta metav1.ObjectMeta) error { + newrb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: objMeta.Name, + Namespace: r.ArtifactPodInfo.Namespace, + // TODO: We can't have cross-namespace owners. The role binding will have to deleted explicitly by the reconciler (finalizer?) + // OwnerReferences: objMeta.OwnerReferences, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: r.ArtifactPodInfo.Role, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: objMeta.Name, + Namespace: objMeta.Namespace, + }, + }, + } + + rb, err := r.clientSet.RbacV1(). + RoleBindings(r.ArtifactPodInfo.Namespace). + Get(ctx, objMeta.Name, metav1.GetOptions{}) + if rb == nil || apierrors.IsNotFound(err) { + _, err := r.clientSet.RbacV1(). + RoleBindings(r.ArtifactPodInfo.Namespace). + Create(ctx, newrb, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + return err +} + +// createRBAC creates a ServiceAccount, and a binding to the CopierRole so that +// the container that copies the artifacts to the http server Pod has the +// permissions to do so. +func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error { + objMeta := genObjectMeta(artifact) + + err := r.createCopierServiceAccount(ctx, objMeta) + if err != nil { + return errors.Wrap(err, "creating a service account") + } + + err = r.createCopierRoleBinding(ctx, objMeta) + if err != nil { + return errors.Wrap(err, "creating a role binding for the copy-role") + } + + return err +} diff --git a/controllers/osartifact_controller.go b/controllers/osartifact_controller.go index 4d8514b..309b5e1 100644 --- a/controllers/osartifact_controller.go +++ b/controllers/osartifact_controller.go @@ -35,12 +35,28 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +type ArtifactPodInfo struct { + Label string + Namespace string + Path string + Role string +} + // OSArtifactReconciler reconciles a OSArtifact object type OSArtifactReconciler struct { client.Client Scheme *runtime.Scheme clientSet *kubernetes.Clientset ServingImage, ToolImage string + ArtifactPodInfo ArtifactPodInfo +} + +func genObjectMeta(artifact buildv1alpha1.OSArtifact) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: artifact.Name, + Namespace: artifact.Namespace, + OwnerReferences: genOwner(artifact), + } } func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference { @@ -100,6 +116,21 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) logger.Info(fmt.Sprintf("Checking deployment %v", osbuild)) + // TODO: We need to create the Role in the namespace where the nginx Pod is, + // so that the copier container has permissions to copy to that Pod. + // The nginx Pod should be defined in the OSArtifact CRD as in "when done + // write the results in this Namespace:Pod, under this path". + // The controller will try to create RBAC with the proper permissions but + // Kubernetes requires us to have the permissions before we grant them to others. + // This means the controller should have these permissions already. + // Since we control the nginx, we can make it so but if the user specifies + // some other Pod it may fail. Also, every OSArtifact will have to specify + // the nginx Pod which makes it cumbersome. + err = r.createRBAC(ctx, osbuild) + if err != nil { + return ctrl.Result{Requeue: true}, err + } + desiredJob := r.genJob(osbuild) job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{}) if job == nil || apierrors.IsNotFound(err) { diff --git a/controllers/suite_test.go b/controllers/suite_test.go index b97f98e..bb1c0c8 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,20 +17,15 @@ limitations under the License. package controllers import ( - "path/filepath" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - - buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" //+kubebuilder:scaffold:imports ) diff --git a/main.go b/main.go index 61a2ae5..ab33883 100644 --- a/main.go +++ b/main.go @@ -53,11 +53,19 @@ func main() { var enableLeaderElection bool var probeAddr string var serveImage, toolImage string + var copyToPodLabel, copyToNamespace, copyToPath, copierRole string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.") // It needs luet inside flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.") + // Information on where to copy the artifacts + flag.StringVar(©ToPodLabel, "copy-to-pod-label", "", "The label of the Pod to which artifacts should be copied.") + flag.StringVar(©ToNamespace, "copy-to-namespace", "", "The namespace of the copy-to-pod-label Pod.") + flag.StringVar(©ToPath, "copy-to-path", "", "The path under which to copy artifacts in the copy-to-pod-label Pod.") + flag.StringVar(&copierRole, "copy-role", "", "The name or the Kubernetes Role that has the permissions to copy artifacts to the copy-to-pod-label Pod") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ @@ -98,7 +106,13 @@ func main() { Client: mgr.GetClient(), ServingImage: serveImage, ToolImage: toolImage, - Scheme: mgr.GetScheme(), + ArtifactPodInfo: controllers.ArtifactPodInfo{ + Label: copyToPodLabel, + Namespace: copyToNamespace, + Path: copyToPath, + Role: copierRole, + }, + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "OSArtifact") os.Exit(1)