Refactor kustomization, implement cleanup with finalizers and write tests

Signed-off-by: Dimitris Karakasilis <dimitris@karakasilis.me>
This commit is contained in:
Dimitris Karakasilis
2022-12-12 18:09:31 +02:00
parent 224291994f
commit 1346374650
15 changed files with 284 additions and 175 deletions

View File

@@ -17,6 +17,7 @@ limitations under the License.
package controllers
import (
"bytes"
"context"
"fmt"
@@ -28,6 +29,9 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/remotecommand"
)
func genJobLabel(s string) map[string]string {
@@ -87,14 +91,15 @@ func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push)
}
func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container {
command := fmt.Sprintf("tar cf - -C artifacts/ . | kubectl exec -i -n %s $(kubectl get pods -l %s -n %s --no-headers -o custom-columns=\":metadata.name\" | head -n1) -- tar xf - -C %s", artifactPodInfo.Namespace, artifactPodInfo.Label, artifactPodInfo.Namespace, artifactPodInfo.Path)
fmt.Printf("command = %+v\n", command)
return v1.Container{
ImagePullPolicy: v1.PullAlways,
Name: "push-to-server",
Image: containerImage,
Command: []string{"/bin/bash", "-cxe"},
Args: []string{
fmt.Sprintf("kubectl get pods -n %s", artifactPodInfo.Namespace),
},
Args: []string{command},
VolumeMounts: []v1.VolumeMount{
{
Name: "rootfs",
@@ -457,3 +462,70 @@ func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1a
return err
}
// removeRBAC deletes the role binding between the service account of this artifact
// and the CopierRole. The ServiceAccount is removed automatically through the Owner
// relationship with the OSArtifact. The RoleBinding can't have it as an owner
// because it is in a different Namespace.
func (r *OSArtifactReconciler) removeRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
err := r.clientSet.RbacV1().RoleBindings(r.ArtifactPodInfo.Namespace).
Delete(ctx, artifact.Name, metav1.DeleteOptions{})
// Ignore not found. No need to do anything.
if err != nil && apierrors.IsNotFound(err) {
return nil
}
return err
}
func (r *OSArtifactReconciler) removeArtifacts(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
//Finding Pods using labels
fmt.Printf("r.ArtifactPodInfo = %+v\n", r.ArtifactPodInfo.Label)
pods, err := r.clientSet.CoreV1().Pods(r.ArtifactPodInfo.Namespace).
List(ctx, metav1.ListOptions{LabelSelector: r.ArtifactPodInfo.Label})
if err != nil {
return errors.Wrap(err, fmt.Sprintf("listing pods with label %s in namespace %s", r.ArtifactPodInfo.Label, r.ArtifactPodInfo.Namespace))
}
if len(pods.Items) < 1 {
return errors.New("No artifact pod found")
}
pod := pods.Items[0]
stdout, stderr, err := r.executeRemoteCommand(r.ArtifactPodInfo.Namespace, pod.Name, fmt.Sprintf("rm -rf %s/%s.*", r.ArtifactPodInfo.Path, artifact.Name))
if err != nil {
return errors.Wrap(err, fmt.Sprintf("%s\n%s", stdout, stderr))
}
return nil
}
func (r *OSArtifactReconciler) executeRemoteCommand(namespace, podName, command string) (string, string, error) {
buf := &bytes.Buffer{}
errBuf := &bytes.Buffer{}
request := r.clientSet.CoreV1().RESTClient().
Post().
Namespace(namespace).
Resource("pods").
Name(podName).
SubResource("exec").
VersionedParams(&v1.PodExecOptions{
Command: []string{"/bin/sh", "-c", command},
Stdin: false,
Stdout: true,
Stderr: true,
TTY: true,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(r.restConfig, "POST", request.URL())
if err != nil {
return "", "", err
}
err = exec.Stream(remotecommand.StreamOptions{
Stdout: buf,
Stderr: errBuf,
})
if err != nil {
return "", "", fmt.Errorf("%w Failed executing command %s on %v/%v", err, command, namespace, podName)
}
return buf.String(), errBuf.String(), nil
}

View File

@@ -25,16 +25,19 @@ import (
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
)
const FinalizerName = "build.kairos.io/osbuilder-finalizer"
type ArtifactPodInfo struct {
Label string
Namespace string
@@ -46,6 +49,7 @@ type ArtifactPodInfo struct {
type OSArtifactReconciler struct {
client.Client
Scheme *runtime.Scheme
restConfig *rest.Config
clientSet *kubernetes.Clientset
ServingImage, ToolImage string
ArtifactPodInfo ArtifactPodInfo
@@ -73,6 +77,12 @@ func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference {
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/finalizers,verbs=update
// TODO: Is this ^ how I should have created rbac permissions for the controller?
// - git commit all changes
// - generate code with kubebuilder
// - check if my permissions were removed
// - do it properly
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
@@ -95,17 +105,22 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
logger.Info(fmt.Sprintf("Reconciling %v", osbuild))
stop, err := r.handleFinalizer(ctx, &osbuild)
if err != nil || stop {
return ctrl.Result{}, err
}
// generate configmap required for building a custom image
desiredConfigMap := r.genConfigMap(osbuild)
logger.Info(fmt.Sprintf("Checking configmap %v", osbuild))
cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, v1.GetOptions{})
cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, metav1.GetOptions{})
if cfgMap == nil || apierrors.IsNotFound(err) {
logger.Info(fmt.Sprintf("Creating service %v", desiredConfigMap))
logger.Info(fmt.Sprintf("Creating config map %v", desiredConfigMap))
cfgMap, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, v1.CreateOptions{})
_, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, metav1.CreateOptions{})
if err != nil {
logger.Error(err, "Failed while creating svc")
logger.Error(err, "Failed while creating config map")
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, err
@@ -116,27 +131,17 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
logger.Info(fmt.Sprintf("Checking deployment %v", osbuild))
// TODO: We need to create the Role in the namespace where the nginx Pod is,
// so that the copier container has permissions to copy to that Pod.
// The nginx Pod should be defined in the OSArtifact CRD as in "when done
// write the results in this Namespace:Pod, under this path".
// The controller will try to create RBAC with the proper permissions but
// Kubernetes requires us to have the permissions before we grant them to others.
// This means the controller should have these permissions already.
// Since we control the nginx, we can make it so but if the user specifies
// some other Pod it may fail. Also, every OSArtifact will have to specify
// the nginx Pod which makes it cumbersome.
err = r.createRBAC(ctx, osbuild)
if err != nil {
return ctrl.Result{Requeue: true}, err
}
desiredJob := r.genJob(osbuild)
job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, v1.GetOptions{})
job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, metav1.GetOptions{})
if job == nil || apierrors.IsNotFound(err) {
logger.Info(fmt.Sprintf("Creating Job %v", job))
job, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, v1.CreateOptions{})
_, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, metav1.CreateOptions{})
if err != nil {
logger.Error(err, "Failed while creating job")
return ctrl.Result{}, nil
@@ -179,13 +184,66 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
// SetupWithManager sets up the controller with the Manager.
func (r *OSArtifactReconciler) SetupWithManager(mgr ctrl.Manager) error {
clientset, err := kubernetes.NewForConfig(mgr.GetConfig())
cfg := mgr.GetConfig()
clientset, err := kubernetes.NewForConfig(cfg)
if err != nil {
return err
}
r.restConfig = cfg
r.clientSet = clientset
return ctrl.NewControllerManagedBy(mgr).
For(&buildv1alpha1.OSArtifact{}).
Complete(r)
}
// Returns true if reconciliation should stop or false otherwise
func (r *OSArtifactReconciler) handleFinalizer(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) (bool, error) {
// examine DeletionTimestamp to determine if object is under deletion
if osbuild.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
if !controllerutil.ContainsFinalizer(osbuild, FinalizerName) {
controllerutil.AddFinalizer(osbuild, FinalizerName)
if err := r.Update(ctx, osbuild); err != nil {
return true, err
}
}
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(osbuild, FinalizerName) {
// our finalizer is present, so lets handle any external dependency
if err := r.finalize(ctx, osbuild); err != nil {
// if fail to delete the external dependency here, return with error
// so that it can be retried
return true, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(osbuild, FinalizerName)
if err := r.Update(ctx, osbuild); err != nil {
return true, err
}
}
// Stop reconciliation as the item is being deleted
return true, nil
}
return false, nil
}
// - Remove artifacts from the server Pod
// - Delete role-binding (because it doesn't have the OSArtifact as an owner and won't be deleted automatically)
func (r *OSArtifactReconciler) finalize(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) error {
if err := r.removeRBAC(ctx, *osbuild); err != nil {
return err
}
if err := r.removeArtifacts(ctx, *osbuild); err != nil {
return err
}
return nil
}

View File

@@ -17,15 +17,20 @@ limitations under the License.
package controllers
import (
"path/filepath"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
//+kubebuilder:scaffold:imports
)