[WIP] Create rbac resources to allow the Job to copy to the server Pod

Currently fails with:

```
Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:default:hello-kairos" cannot list resource "pods" in API group "" at the cluster scope
```

because we try to list pods with `-A`. This means we are going to get a
similar error if we try to copy files to a Pod on another namespace
unless we grant permission at the cluster scope or just that
namespace. (Is that possible? Maybe if we create the Role in the same
namespace as the server.)

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
This commit is contained in:
Dimitris Karakasilis
2022-12-08 16:35:45 +02:00
parent 44a48d7890
commit 224291994f
10 changed files with 255 additions and 20 deletions

View File

@@ -147,6 +147,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
.PHONY: deploy .PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
# TODO: No need to build and then apply. `kubectl apply -k config/default` does the trick
$(KUSTOMIZE) build config/default | kubectl apply -f - $(KUSTOMIZE) build config/default | kubectl apply -f -
@@ -282,4 +283,4 @@ kind-e2e-tests: ginkgo kind-setup install undeploy-dev deploy-dev e2e-tests
kubesplit: manifests kustomize kubesplit: manifests kustomize
rm -rf helm-chart rm -rf helm-chart
mkdir helm-chart mkdir helm-chart
$(KUSTOMIZE) build config/default | kubesplit -helm helm-chart $(KUSTOMIZE) build config/default | kubesplit -helm helm-chart

View File

@@ -38,3 +38,7 @@ spec:
- "--health-probe-bind-address=:8081" - "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080" - "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect" - "--leader-elect"
- "--copy-to-namespace=$(NGINX_NAMESPACE)"
- "--copy-role=$(ARTIFACT_COPIER_ROLE)"
- --copy-to-pod-label="app.kubernetes.io/name=osbuilder-nginx"
- --copy-to-path="/usr/share/nginx/html"

View File

@@ -1,3 +1,10 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: system
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:

View File

@@ -1,3 +1,4 @@
resources: resources:
- deployment.yaml - deployment.yaml
- service.yaml - service.yaml
- role.yaml

12
config/nginx/role.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: artifactCopier
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- list

View File

@@ -5,6 +5,31 @@ metadata:
creationTimestamp: null creationTimestamp: null
name: manager-role name: manager-role
rules: rules:
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- roles
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups: - apiGroups:
- build.kairos.io - build.kairos.io
resources: resources:
@@ -54,3 +79,11 @@ rules:
- get - get
- create - create
- update - update
# Temporary so that it can grant these permissions to the created role
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- get

View File

@@ -17,12 +17,16 @@ limitations under the License.
package controllers package controllers
import ( import (
"context"
"fmt" "fmt"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1" buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
"github.com/pkg/errors"
batchv1 "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
@@ -34,6 +38,7 @@ func genJobLabel(s string) map[string]string {
// TODO: Handle registry auth // TODO: Handle registry auth
// TODO: This shells out, but needs ENV_VAR with key refs mapping // TODO: This shells out, but needs ENV_VAR with key refs mapping
// TODO: Cache downloaded images?
func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container { func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container {
return v1.Container{ return v1.Container{
ImagePullPolicy: v1.PullAlways, ImagePullPolicy: v1.PullAlways,
@@ -81,6 +86,28 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push)
} }
} }
func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container {
return v1.Container{
ImagePullPolicy: v1.PullAlways,
Name: "push-to-server",
Image: containerImage,
Command: []string{"/bin/bash", "-cxe"},
Args: []string{
fmt.Sprintf("kubectl get pods -n %s", artifactPodInfo.Namespace),
},
VolumeMounts: []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
},
{
Name: "artifacts",
MountPath: "/artifacts",
},
},
}
}
func osReleaseContainer(containerImage string) v1.Container { func osReleaseContainer(containerImage string) v1.Container {
return v1.Container{ return v1.Container{
ImagePullPolicy: v1.PullAlways, ImagePullPolicy: v1.PullAlways,
@@ -105,16 +132,12 @@ func osReleaseContainer(containerImage string) v1.Container {
} }
func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job { func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job {
objMeta := metav1.ObjectMeta{ objMeta := genObjectMeta(artifact)
Name: artifact.Name,
Namespace: artifact.Namespace,
OwnerReferences: genOwner(artifact),
}
pushImage := artifact.Spec.PushOptions.Push pushImage := artifact.Spec.PushOptions.Push
privileged := false privileged := false
serviceAccount := false serviceAccount := true
cmd := fmt.Sprintf( cmd := fmt.Sprintf(
"/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs", "/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs",
@@ -248,9 +271,11 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv
pod := v1.PodSpec{ pod := v1.PodSpec{
AutomountServiceAccountToken: &serviceAccount, AutomountServiceAccountToken: &serviceAccount,
ServiceAccountName: objMeta.Name,
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "public", Name: "artifacts",
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
}, },
{ {
@@ -274,7 +299,6 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv
if artifact.Spec.OSRelease != "" { if artifact.Spec.OSRelease != "" {
pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage)) pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage))
} }
if artifact.Spec.ISO || artifact.Spec.Netboot { if artifact.Spec.ISO || artifact.Spec.Netboot {
@@ -297,10 +321,17 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv
pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer) pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer)
} }
// TODO: Shell out to `kubectl cp`? Why not?
// TODO: Does it make sense to build the image and not push it? Maybe remove
// this flag?
if pushImage { if pushImage {
pod.Containers = []v1.Container{ pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact.Spec.PushOptions))
createImageContainer(r.ToolImage, artifact.Spec.PushOptions), }
}
pod.Containers = []v1.Container{
// TODO: Add kubectl to osbuilder-tools?
//createPushToServerImageContainer(r.ToolImage),
createPushToServerImageContainer("bitnami/kubectl", r.ArtifactPodInfo),
} }
jobLabels := genJobLabel(artifact.Name) jobLabels := genJobLabel(artifact.Name)
@@ -308,7 +339,6 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv
job := batchv1.Job{ job := batchv1.Job{
ObjectMeta: objMeta, ObjectMeta: objMeta,
Spec: batchv1.JobSpec{ Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{MatchLabels: jobLabels},
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Labels: jobLabels, Labels: jobLabels,
@@ -320,3 +350,110 @@ func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv
return &job return &job
} }
// createServiceAccount creates a service account that has the permissions to
// copy the artifacts to the http server Pod. This service account is used for
// the "push to server" container.
func (r *OSArtifactReconciler) createCopierServiceAccount(ctx context.Context, objMeta metav1.ObjectMeta) error {
sa, err := r.clientSet.CoreV1().
ServiceAccounts(objMeta.Namespace).Get(ctx, objMeta.Name, metav1.GetOptions{})
if sa == nil || apierrors.IsNotFound(err) {
t := true
_, err := r.clientSet.CoreV1().ServiceAccounts(objMeta.Namespace).Create(ctx,
&v1.ServiceAccount{
ObjectMeta: objMeta,
AutomountServiceAccountToken: &t,
}, metav1.CreateOptions{})
if err != nil {
return err
}
}
return err
}
// func (r *OSArtifactReconciler) createCopierRole(ctx context.Context, objMeta metav1.ObjectMeta) error {
// role, err := r.clientSet.RbacV1().
// Roles(objMeta.Namespace).
// Get(ctx, objMeta.Name, metav1.GetOptions{})
// if role == nil || apierrors.IsNotFound(err) {
// _, err := r.clientSet.RbacV1().Roles(objMeta.Namespace).Create(ctx,
// &rbacv1.Role{
// ObjectMeta: objMeta,
// Rules: []rbacv1.PolicyRule{
// // TODO: The actual permissions we need is that to copy to a Pod.
// // The Pod is on another namespace, so we need a cluster wide permission.
// // This can get viral because the controller needs to have the permissions
// // if it is to grant them to the Job.
// {
// Verbs: []string{"list"},
// APIGroups: []string{""},
// Resources: []string{"pods"},
// },
// },
// },
// metav1.CreateOptions{},
// )
// if err != nil {
// return err
// }
// }
// return err
// }
func (r *OSArtifactReconciler) createCopierRoleBinding(ctx context.Context, objMeta metav1.ObjectMeta) error {
newrb := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: objMeta.Name,
Namespace: r.ArtifactPodInfo.Namespace,
// TODO: We can't have cross-namespace owners. The role binding will have to deleted explicitly by the reconciler (finalizer?)
// OwnerReferences: objMeta.OwnerReferences,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: r.ArtifactPodInfo.Role,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: objMeta.Name,
Namespace: objMeta.Namespace,
},
},
}
rb, err := r.clientSet.RbacV1().
RoleBindings(r.ArtifactPodInfo.Namespace).
Get(ctx, objMeta.Name, metav1.GetOptions{})
if rb == nil || apierrors.IsNotFound(err) {
_, err := r.clientSet.RbacV1().
RoleBindings(r.ArtifactPodInfo.Namespace).
Create(ctx, newrb, metav1.CreateOptions{})
if err != nil {
return err
}
}
return err
}
// createRBAC creates a ServiceAccount, and a binding to the CopierRole so that
// the container that copies the artifacts to the http server Pod has the
// permissions to do so.
func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
objMeta := genObjectMeta(artifact)
err := r.createCopierServiceAccount(ctx, objMeta)
if err != nil {
return errors.Wrap(err, "creating a service account")
}
err = r.createCopierRoleBinding(ctx, objMeta)
if err != nil {
return errors.Wrap(err, "creating a role binding for the copy-role")
}
return err
}

View File

@@ -35,12 +35,28 @@ import (
"sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log"
) )
type ArtifactPodInfo struct {
Label string
Namespace string
Path string
Role string
}
// OSArtifactReconciler reconciles a OSArtifact object // OSArtifactReconciler reconciles a OSArtifact object
type OSArtifactReconciler struct { type OSArtifactReconciler struct {
client.Client client.Client
Scheme *runtime.Scheme Scheme *runtime.Scheme
clientSet *kubernetes.Clientset clientSet *kubernetes.Clientset
ServingImage, ToolImage string ServingImage, ToolImage string
ArtifactPodInfo ArtifactPodInfo
}
func genObjectMeta(artifact buildv1alpha1.OSArtifact) metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: artifact.Name,
Namespace: artifact.Namespace,
OwnerReferences: genOwner(artifact),
}
} }
func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference { func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference {
@@ -100,6 +116,21 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
logger.Info(fmt.Sprintf("Checking deployment %v", osbuild)) logger.Info(fmt.Sprintf("Checking deployment %v", osbuild))
// TODO: We need to create the Role in the namespace where the nginx Pod is,
// so that the copier container has permissions to copy to that Pod.
// The nginx Pod should be defined in the OSArtifact CRD as in "when done
// write the results in this Namespace:Pod, under this path".
// The controller will try to create RBAC with the proper permissions but
// Kubernetes requires us to have the permissions before we grant them to others.
// This means the controller should have these permissions already.
// Since we control the nginx, we can make it so but if the user specifies
// some other Pod it may fail. Also, every OSArtifact will have to specify
// the nginx Pod which makes it cumbersome.
err = r.createRBAC(ctx, osbuild)
if err != nil {
return ctrl.Result{Requeue: true}, err
}
desiredJob := r.genJob(osbuild) desiredJob := r.genJob(osbuild)
job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{}) job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{})
if job == nil || apierrors.IsNotFound(err) { if job == nil || apierrors.IsNotFound(err) {

View File

@@ -17,20 +17,15 @@ limitations under the License.
package controllers package controllers
import ( import (
"path/filepath"
"testing" "testing"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log" logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/log/zap"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
//+kubebuilder:scaffold:imports //+kubebuilder:scaffold:imports
) )

16
main.go
View File

@@ -53,11 +53,19 @@ func main() {
var enableLeaderElection bool var enableLeaderElection bool
var probeAddr string var probeAddr string
var serveImage, toolImage string var serveImage, toolImage string
var copyToPodLabel, copyToNamespace, copyToPath, copierRole string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.") flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.")
// It needs luet inside // It needs luet inside
flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.") flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.")
// Information on where to copy the artifacts
flag.StringVar(&copyToPodLabel, "copy-to-pod-label", "", "The label of the Pod to which artifacts should be copied.")
flag.StringVar(&copyToNamespace, "copy-to-namespace", "", "The namespace of the copy-to-pod-label Pod.")
flag.StringVar(&copyToPath, "copy-to-path", "", "The path under which to copy artifacts in the copy-to-pod-label Pod.")
flag.StringVar(&copierRole, "copy-role", "", "The name or the Kubernetes Role that has the permissions to copy artifacts to the copy-to-pod-label Pod")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false, flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+ "Enable leader election for controller manager. "+
@@ -98,7 +106,13 @@ func main() {
Client: mgr.GetClient(), Client: mgr.GetClient(),
ServingImage: serveImage, ServingImage: serveImage,
ToolImage: toolImage, ToolImage: toolImage,
Scheme: mgr.GetScheme(), ArtifactPodInfo: controllers.ArtifactPodInfo{
Label: copyToPodLabel,
Namespace: copyToNamespace,
Path: copyToPath,
Role: copierRole,
},
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil { }).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OSArtifact") setupLog.Error(err, "unable to create controller", "controller", "OSArtifact")
os.Exit(1) os.Exit(1)