mirror of
https://github.com/kairos-io/osbuilder.git
synced 2025-07-06 19:49:04 +00:00
Currently fails with: ``` Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:default:hello-kairos" cannot list resource "pods" in API group "" at the cluster scope ``` because we try to list pods with `-A`. This means we are going to get a similar error if we try to copy files to a Pod on another namespace unless we grant permission at the cluster scope or just that namespace. (Is that possible? Maybe if we create the Role in the same namespace as the server.) Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
460 lines
12 KiB
Go
460 lines
12 KiB
Go
/*
|
|
Copyright 2022.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package controllers
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
|
|
"github.com/pkg/errors"
|
|
batchv1 "k8s.io/api/batch/v1"
|
|
v1 "k8s.io/api/core/v1"
|
|
rbacv1 "k8s.io/api/rbac/v1"
|
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
)
|
|
|
|
func genJobLabel(s string) map[string]string {
|
|
return map[string]string{
|
|
"osbuild": "workload" + s,
|
|
}
|
|
}
|
|
|
|
// TODO: Handle registry auth
|
|
// TODO: This shells out, but needs ENV_VAR with key refs mapping
|
|
// TODO: Cache downloaded images?
|
|
func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container {
|
|
return v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
Name: fmt.Sprintf("pull-image-%s", id),
|
|
Image: containerImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
fmt.Sprintf(
|
|
"luet util unpack %s %s",
|
|
pullImage,
|
|
"/rootfs",
|
|
),
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "rootfs",
|
|
MountPath: "/rootfs",
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) v1.Container {
|
|
return v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
Name: "create-image",
|
|
Image: containerImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
fmt.Sprintf(
|
|
"tar -czvpf test.tar -C /rootfs . && luet util pack %s test.tar image.tar && mv image.tar /artifacts",
|
|
pushOptions.ImageName,
|
|
),
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "rootfs",
|
|
MountPath: "/rootfs",
|
|
},
|
|
{
|
|
Name: "artifacts",
|
|
MountPath: "/artifacts",
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container {
|
|
return v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
Name: "push-to-server",
|
|
Image: containerImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
fmt.Sprintf("kubectl get pods -n %s", artifactPodInfo.Namespace),
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "rootfs",
|
|
MountPath: "/rootfs",
|
|
},
|
|
{
|
|
Name: "artifacts",
|
|
MountPath: "/artifacts",
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
func osReleaseContainer(containerImage string) v1.Container {
|
|
return v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
Name: "os-release",
|
|
Image: containerImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
"cp -rfv /etc/os-release /rootfs/etc/os-release",
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "config",
|
|
MountPath: "/etc/os-release",
|
|
SubPath: "os-release",
|
|
},
|
|
{
|
|
Name: "rootfs",
|
|
MountPath: "/rootfs",
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job {
|
|
objMeta := genObjectMeta(artifact)
|
|
|
|
pushImage := artifact.Spec.PushOptions.Push
|
|
|
|
privileged := false
|
|
serviceAccount := true
|
|
|
|
cmd := fmt.Sprintf(
|
|
"/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs",
|
|
artifact.Name,
|
|
)
|
|
|
|
volumeMounts := []v1.VolumeMount{
|
|
{
|
|
Name: "artifacts",
|
|
MountPath: "/artifacts",
|
|
},
|
|
{
|
|
Name: "rootfs",
|
|
MountPath: "/rootfs",
|
|
},
|
|
}
|
|
|
|
if artifact.Spec.GRUBConfig != "" {
|
|
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
|
Name: "config",
|
|
MountPath: "/iso/iso-overlay/boot/grub2/grub.cfg",
|
|
SubPath: "grub.cfg",
|
|
})
|
|
}
|
|
|
|
cloudImgCmd := fmt.Sprintf(
|
|
"/raw-images.sh /rootfs /artifacts/%s.raw",
|
|
artifact.Name,
|
|
)
|
|
|
|
if artifact.Spec.CloudConfig != "" {
|
|
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
|
Name: "config",
|
|
MountPath: "/iso/iso-overlay/cloud_config.yaml",
|
|
SubPath: "config",
|
|
})
|
|
|
|
cloudImgCmd += " /iso/iso-overlay/cloud_config.yaml"
|
|
}
|
|
|
|
if artifact.Spec.CloudConfig != "" || artifact.Spec.GRUBConfig != "" {
|
|
cmd = fmt.Sprintf(
|
|
"/entrypoint.sh --debug --name %s build-iso --date=false --overlay-iso /iso/iso-overlay --output /artifacts dir:/rootfs",
|
|
artifact.Name,
|
|
)
|
|
}
|
|
|
|
buildIsoContainer := v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
Name: "build-iso",
|
|
Image: r.ToolImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
cmd,
|
|
},
|
|
VolumeMounts: volumeMounts,
|
|
}
|
|
|
|
buildCloudImageContainer := v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
Name: "build-cloud-image",
|
|
Image: r.ToolImage,
|
|
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
cloudImgCmd,
|
|
},
|
|
VolumeMounts: volumeMounts,
|
|
}
|
|
|
|
if artifact.Spec.DiskSize != "" {
|
|
buildCloudImageContainer.Env = []v1.EnvVar{{
|
|
Name: "EXTEND",
|
|
Value: artifact.Spec.DiskSize,
|
|
}}
|
|
}
|
|
|
|
extractNetboot := v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
Name: "build-netboot",
|
|
Image: r.ToolImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Env: []v1.EnvVar{{
|
|
Name: "URL",
|
|
Value: artifact.Spec.NetbootURL,
|
|
}},
|
|
Args: []string{
|
|
fmt.Sprintf(
|
|
"/netboot.sh /artifacts/%s.iso /artifacts/%s",
|
|
artifact.Name,
|
|
artifact.Name,
|
|
),
|
|
},
|
|
VolumeMounts: volumeMounts,
|
|
}
|
|
|
|
buildAzureCloudImageContainer := v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
Name: "build-azure-cloud-image",
|
|
Image: r.ToolImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
fmt.Sprintf(
|
|
"/azure.sh /artifacts/%s.raw /artifacts/%s.vhd",
|
|
artifact.Name,
|
|
artifact.Name,
|
|
),
|
|
},
|
|
VolumeMounts: volumeMounts,
|
|
}
|
|
|
|
buildGCECloudImageContainer := v1.Container{
|
|
ImagePullPolicy: v1.PullAlways,
|
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
Name: "build-gce-cloud-image",
|
|
Image: r.ToolImage,
|
|
Command: []string{"/bin/bash", "-cxe"},
|
|
Args: []string{
|
|
fmt.Sprintf(
|
|
"/gce.sh /artifacts/%s.raw /artifacts/%s.gce.raw",
|
|
artifact.Name,
|
|
artifact.Name,
|
|
),
|
|
},
|
|
VolumeMounts: volumeMounts,
|
|
}
|
|
|
|
pod := v1.PodSpec{
|
|
AutomountServiceAccountToken: &serviceAccount,
|
|
ServiceAccountName: objMeta.Name,
|
|
RestartPolicy: v1.RestartPolicyNever,
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "artifacts",
|
|
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
|
},
|
|
{
|
|
Name: "rootfs",
|
|
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
|
},
|
|
{
|
|
Name: "config",
|
|
VolumeSource: v1.VolumeSource{
|
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
|
LocalObjectReference: v1.LocalObjectReference{Name: artifact.Name}}},
|
|
},
|
|
},
|
|
}
|
|
|
|
pod.InitContainers = []v1.Container{unpackContainer("baseimage", r.ToolImage, artifact.Spec.ImageName, artifact.Spec.PullOptions)}
|
|
|
|
for i, bundle := range artifact.Spec.Bundles {
|
|
pod.InitContainers = append(pod.InitContainers, unpackContainer(fmt.Sprint(i), r.ToolImage, bundle, artifact.Spec.PullOptions))
|
|
}
|
|
|
|
if artifact.Spec.OSRelease != "" {
|
|
pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage))
|
|
}
|
|
|
|
if artifact.Spec.ISO || artifact.Spec.Netboot {
|
|
pod.InitContainers = append(pod.InitContainers, buildIsoContainer)
|
|
}
|
|
|
|
if artifact.Spec.Netboot {
|
|
pod.InitContainers = append(pod.InitContainers, extractNetboot)
|
|
}
|
|
|
|
if artifact.Spec.CloudImage || artifact.Spec.AzureImage || artifact.Spec.GCEImage {
|
|
pod.InitContainers = append(pod.InitContainers, buildCloudImageContainer)
|
|
}
|
|
|
|
if artifact.Spec.AzureImage {
|
|
pod.InitContainers = append(pod.InitContainers, buildAzureCloudImageContainer)
|
|
}
|
|
|
|
if artifact.Spec.GCEImage {
|
|
pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer)
|
|
}
|
|
|
|
// TODO: Shell out to `kubectl cp`? Why not?
|
|
// TODO: Does it make sense to build the image and not push it? Maybe remove
|
|
// this flag?
|
|
if pushImage {
|
|
pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact.Spec.PushOptions))
|
|
}
|
|
|
|
pod.Containers = []v1.Container{
|
|
// TODO: Add kubectl to osbuilder-tools?
|
|
//createPushToServerImageContainer(r.ToolImage),
|
|
createPushToServerImageContainer("bitnami/kubectl", r.ArtifactPodInfo),
|
|
}
|
|
|
|
jobLabels := genJobLabel(artifact.Name)
|
|
|
|
job := batchv1.Job{
|
|
ObjectMeta: objMeta,
|
|
Spec: batchv1.JobSpec{
|
|
Template: v1.PodTemplateSpec{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Labels: jobLabels,
|
|
},
|
|
Spec: pod,
|
|
},
|
|
},
|
|
}
|
|
|
|
return &job
|
|
}
|
|
|
|
// createServiceAccount creates a service account that has the permissions to
|
|
// copy the artifacts to the http server Pod. This service account is used for
|
|
// the "push to server" container.
|
|
func (r *OSArtifactReconciler) createCopierServiceAccount(ctx context.Context, objMeta metav1.ObjectMeta) error {
|
|
sa, err := r.clientSet.CoreV1().
|
|
ServiceAccounts(objMeta.Namespace).Get(ctx, objMeta.Name, metav1.GetOptions{})
|
|
if sa == nil || apierrors.IsNotFound(err) {
|
|
t := true
|
|
_, err := r.clientSet.CoreV1().ServiceAccounts(objMeta.Namespace).Create(ctx,
|
|
&v1.ServiceAccount{
|
|
ObjectMeta: objMeta,
|
|
AutomountServiceAccountToken: &t,
|
|
}, metav1.CreateOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
// func (r *OSArtifactReconciler) createCopierRole(ctx context.Context, objMeta metav1.ObjectMeta) error {
|
|
// role, err := r.clientSet.RbacV1().
|
|
// Roles(objMeta.Namespace).
|
|
// Get(ctx, objMeta.Name, metav1.GetOptions{})
|
|
// if role == nil || apierrors.IsNotFound(err) {
|
|
// _, err := r.clientSet.RbacV1().Roles(objMeta.Namespace).Create(ctx,
|
|
// &rbacv1.Role{
|
|
// ObjectMeta: objMeta,
|
|
// Rules: []rbacv1.PolicyRule{
|
|
// // TODO: The actual permissions we need is that to copy to a Pod.
|
|
// // The Pod is on another namespace, so we need a cluster wide permission.
|
|
// // This can get viral because the controller needs to have the permissions
|
|
// // if it is to grant them to the Job.
|
|
// {
|
|
// Verbs: []string{"list"},
|
|
// APIGroups: []string{""},
|
|
// Resources: []string{"pods"},
|
|
// },
|
|
// },
|
|
// },
|
|
// metav1.CreateOptions{},
|
|
// )
|
|
// if err != nil {
|
|
// return err
|
|
// }
|
|
// }
|
|
|
|
// return err
|
|
// }
|
|
|
|
func (r *OSArtifactReconciler) createCopierRoleBinding(ctx context.Context, objMeta metav1.ObjectMeta) error {
|
|
newrb := &rbacv1.RoleBinding{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: objMeta.Name,
|
|
Namespace: r.ArtifactPodInfo.Namespace,
|
|
// TODO: We can't have cross-namespace owners. The role binding will have to deleted explicitly by the reconciler (finalizer?)
|
|
// OwnerReferences: objMeta.OwnerReferences,
|
|
},
|
|
RoleRef: rbacv1.RoleRef{
|
|
APIGroup: "rbac.authorization.k8s.io",
|
|
Kind: "Role",
|
|
Name: r.ArtifactPodInfo.Role,
|
|
},
|
|
Subjects: []rbacv1.Subject{
|
|
{
|
|
Kind: "ServiceAccount",
|
|
Name: objMeta.Name,
|
|
Namespace: objMeta.Namespace,
|
|
},
|
|
},
|
|
}
|
|
|
|
rb, err := r.clientSet.RbacV1().
|
|
RoleBindings(r.ArtifactPodInfo.Namespace).
|
|
Get(ctx, objMeta.Name, metav1.GetOptions{})
|
|
if rb == nil || apierrors.IsNotFound(err) {
|
|
_, err := r.clientSet.RbacV1().
|
|
RoleBindings(r.ArtifactPodInfo.Namespace).
|
|
Create(ctx, newrb, metav1.CreateOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return err
|
|
}
|
|
|
|
// createRBAC creates a ServiceAccount, and a binding to the CopierRole so that
|
|
// the container that copies the artifacts to the http server Pod has the
|
|
// permissions to do so.
|
|
func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
|
|
objMeta := genObjectMeta(artifact)
|
|
|
|
err := r.createCopierServiceAccount(ctx, objMeta)
|
|
if err != nil {
|
|
return errors.Wrap(err, "creating a service account")
|
|
}
|
|
|
|
err = r.createCopierRoleBinding(ctx, objMeta)
|
|
if err != nil {
|
|
return errors.Wrap(err, "creating a role binding for the copy-role")
|
|
}
|
|
|
|
return err
|
|
}
|