mirror of
https://github.com/kairos-io/osbuilder.git
synced 2025-08-12 13:12:16 +00:00
Merge pull request #22 from kairos-io/515-central-server
515 central server
This commit is contained in:
commit
e6e5cddbfe
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@ -7,6 +7,7 @@ on:
|
|||||||
- master
|
- master
|
||||||
tags:
|
tags:
|
||||||
- '*'
|
- '*'
|
||||||
|
pull_request:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
@ -16,4 +17,4 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
make kind-e2e-tests
|
make kind-e2e-tests
|
||||||
|
7
Makefile
7
Makefile
@ -147,13 +147,12 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
|
|||||||
.PHONY: deploy
|
.PHONY: deploy
|
||||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||||
|
# TODO: No need to build and then apply. `kubectl apply -k config/default` does the trick
|
||||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||||
|
|
||||||
|
|
||||||
.PHONY: deploy-dev
|
.PHONY: deploy-dev
|
||||||
deploy-dev: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
deploy-dev: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
kubectl apply -k config/dev
|
||||||
$(KUSTOMIZE) build config/dev | kubectl apply -f -
|
|
||||||
|
|
||||||
.PHONY: undeploy
|
.PHONY: undeploy
|
||||||
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
|
||||||
@ -282,4 +281,4 @@ kind-e2e-tests: ginkgo kind-setup install undeploy-dev deploy-dev e2e-tests
|
|||||||
kubesplit: manifests kustomize
|
kubesplit: manifests kustomize
|
||||||
rm -rf helm-chart
|
rm -rf helm-chart
|
||||||
mkdir helm-chart
|
mkdir helm-chart
|
||||||
$(KUSTOMIZE) build config/default | kubesplit -helm helm-chart
|
$(KUSTOMIZE) build config/default | kubesplit -helm helm-chart
|
||||||
|
@ -16,6 +16,7 @@ bases:
|
|||||||
- ../crd
|
- ../crd
|
||||||
- ../rbac
|
- ../rbac
|
||||||
- ../manager
|
- ../manager
|
||||||
|
- ../nginx
|
||||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
||||||
# crd/kustomization.yaml
|
# crd/kustomization.yaml
|
||||||
#- ../webhook
|
#- ../webhook
|
||||||
@ -72,3 +73,16 @@ vars:
|
|||||||
# kind: Service
|
# kind: Service
|
||||||
# version: v1
|
# version: v1
|
||||||
# name: webhook-service
|
# name: webhook-service
|
||||||
|
|
||||||
|
vars:
|
||||||
|
- name: NGINX_NAMESPACE
|
||||||
|
objref:
|
||||||
|
kind: Namespace
|
||||||
|
name: system
|
||||||
|
apiVersion: v1
|
||||||
|
|
||||||
|
- name: ARTIFACT_COPIER_ROLE
|
||||||
|
objref:
|
||||||
|
kind: Role
|
||||||
|
name: artifactCopier
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
@ -38,3 +38,7 @@ spec:
|
|||||||
- "--health-probe-bind-address=:8081"
|
- "--health-probe-bind-address=:8081"
|
||||||
- "--metrics-bind-address=127.0.0.1:8080"
|
- "--metrics-bind-address=127.0.0.1:8080"
|
||||||
- "--leader-elect"
|
- "--leader-elect"
|
||||||
|
- "--copy-to-namespace=$(NGINX_NAMESPACE)"
|
||||||
|
- "--copy-role=$(ARTIFACT_COPIER_ROLE)"
|
||||||
|
- --copy-to-pod-label=app.kubernetes.io/name=osbuilder-nginx
|
||||||
|
- --copy-to-path="/usr/share/nginx/html"
|
||||||
|
@ -1,74 +1,7 @@
|
|||||||
# Adds namespace to all resources.
|
|
||||||
namespace: osartifactbuilder-operator-system
|
|
||||||
|
|
||||||
# Value of this field is prepended to the
|
|
||||||
# names of all resources, e.g. a deployment named
|
|
||||||
# "wordpress" becomes "alices-wordpress".
|
|
||||||
# Note that it should also match with the prefix (text before '-') of the namespace
|
|
||||||
# field above.
|
|
||||||
namePrefix: osartifactbuilder-operator-
|
|
||||||
|
|
||||||
# Labels to add to all resources and selectors.
|
|
||||||
#commonLabels:
|
|
||||||
# someName: someValue
|
|
||||||
|
|
||||||
bases:
|
bases:
|
||||||
- ../crd
|
- ../default
|
||||||
- ../rbac
|
|
||||||
- ../manager
|
|
||||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
|
||||||
# crd/kustomization.yaml
|
|
||||||
#- ../webhook
|
|
||||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
|
|
||||||
#- ../certmanager
|
|
||||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
|
||||||
#- ../prometheus
|
|
||||||
|
|
||||||
patchesStrategicMerge:
|
images:
|
||||||
# Protect the /metrics endpoint by putting it behind auth.
|
- name: quay.io/kairos/osbuilder
|
||||||
# If you want your controller-manager to expose the /metrics
|
newName: quay.io/kairos/osbuilder
|
||||||
# endpoint w/o any authn/z, please comment the following line.
|
newTag: test
|
||||||
- manager_auth_proxy_patch.yaml
|
|
||||||
|
|
||||||
# Mount the controller config file for loading manager configurations
|
|
||||||
# through a ComponentConfig type
|
|
||||||
#- manager_config_patch.yaml
|
|
||||||
|
|
||||||
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
|
|
||||||
# crd/kustomization.yaml
|
|
||||||
#- manager_webhook_patch.yaml
|
|
||||||
|
|
||||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
|
|
||||||
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
|
|
||||||
# 'CERTMANAGER' needs to be enabled to use ca injection
|
|
||||||
#- webhookcainjection_patch.yaml
|
|
||||||
|
|
||||||
# the following config is for teaching kustomize how to do var substitution
|
|
||||||
vars:
|
|
||||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
|
||||||
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
|
|
||||||
# objref:
|
|
||||||
# kind: Certificate
|
|
||||||
# group: cert-manager.io
|
|
||||||
# version: v1
|
|
||||||
# name: serving-cert # this name should match the one in certificate.yaml
|
|
||||||
# fieldref:
|
|
||||||
# fieldpath: metadata.namespace
|
|
||||||
#- name: CERTIFICATE_NAME
|
|
||||||
# objref:
|
|
||||||
# kind: Certificate
|
|
||||||
# group: cert-manager.io
|
|
||||||
# version: v1
|
|
||||||
# name: serving-cert # this name should match the one in certificate.yaml
|
|
||||||
#- name: SERVICE_NAMESPACE # namespace of the service
|
|
||||||
# objref:
|
|
||||||
# kind: Service
|
|
||||||
# version: v1
|
|
||||||
# name: webhook-service
|
|
||||||
# fieldref:
|
|
||||||
# fieldpath: metadata.namespace
|
|
||||||
#- name: SERVICE_NAME
|
|
||||||
# objref:
|
|
||||||
# kind: Service
|
|
||||||
# version: v1
|
|
||||||
# name: webhook-service
|
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
# This patch inject a sidecar container which is a HTTP proxy for the
|
|
||||||
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: controller-manager
|
|
||||||
namespace: system
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: kube-rbac-proxy
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
# TODO(user): uncomment for common cases that do not require escalating privileges
|
|
||||||
# capabilities:
|
|
||||||
# drop:
|
|
||||||
# - "ALL"
|
|
||||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0
|
|
||||||
args:
|
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
|
||||||
- "--logtostderr=true"
|
|
||||||
- "--v=0"
|
|
||||||
ports:
|
|
||||||
- containerPort: 8443
|
|
||||||
protocol: TCP
|
|
||||||
name: https
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
cpu: 500m
|
|
||||||
memory: 128Mi
|
|
||||||
requests:
|
|
||||||
cpu: 5m
|
|
||||||
memory: 64Mi
|
|
||||||
- name: manager
|
|
||||||
args:
|
|
||||||
- "--health-probe-bind-address=:8081"
|
|
||||||
- "--metrics-bind-address=127.0.0.1:8080"
|
|
||||||
- "--leader-elect"
|
|
@ -1,21 +0,0 @@
|
|||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: controller-manager
|
|
||||||
namespace: system
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: manager
|
|
||||||
imagePullPolicy: Never
|
|
||||||
args:
|
|
||||||
- "--config=controller_manager_config.yaml"
|
|
||||||
volumeMounts:
|
|
||||||
- name: manager-config
|
|
||||||
mountPath: /controller_manager_config.yaml
|
|
||||||
subPath: controller_manager_config.yaml
|
|
||||||
volumes:
|
|
||||||
- name: manager-config
|
|
||||||
configMap:
|
|
||||||
name: manager-config
|
|
@ -1,6 +1,8 @@
|
|||||||
resources:
|
resources:
|
||||||
- manager.yaml
|
- manager.yaml
|
||||||
|
|
||||||
|
namespace: system
|
||||||
|
|
||||||
generatorOptions:
|
generatorOptions:
|
||||||
disableNameSuffixHash: true
|
disableNameSuffixHash: true
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ apiVersion: apps/v1
|
|||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: controller-manager
|
name: controller-manager
|
||||||
namespace: system
|
|
||||||
labels:
|
labels:
|
||||||
control-plane: controller-manager
|
control-plane: controller-manager
|
||||||
spec:
|
spec:
|
||||||
|
41
config/nginx/deployment.yaml
Normal file
41
config/nginx/deployment.yaml
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: nginx-public
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 3Gi
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: nginx
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: osbuilder-nginx
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: osbuilder-nginx
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: osbuilder-nginx
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- image: nginx
|
||||||
|
name: nginx
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/usr/share/nginx/html"
|
||||||
|
name: nginx-public
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
serviceAccountName: controller-manager
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
volumes:
|
||||||
|
- name: nginx-public
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: nginx-public
|
4
config/nginx/kustomization.yaml
Normal file
4
config/nginx/kustomization.yaml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
resources:
|
||||||
|
- deployment.yaml
|
||||||
|
- service.yaml
|
||||||
|
- role.yaml
|
18
config/nginx/role.yaml
Normal file
18
config/nginx/role.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: artifactCopier
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/exec
|
||||||
|
verbs:
|
||||||
|
- create
|
12
config/nginx/service.yaml
Normal file
12
config/nginx/service.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: osbuilder-nginx
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/name: osbuilder-nginx
|
||||||
|
ports:
|
||||||
|
- protocol: TCP
|
||||||
|
port: 80
|
||||||
|
targetPort: 80
|
@ -5,6 +5,31 @@ metadata:
|
|||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
name: manager-role
|
name: manager-role
|
||||||
rules:
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- "rbac.authorization.k8s.io"
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- build.kairos.io
|
- build.kairos.io
|
||||||
resources:
|
resources:
|
||||||
@ -40,17 +65,30 @@ rules:
|
|||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- services
|
|
||||||
- configmaps
|
- configmaps
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- create
|
- create
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- "apps"
|
- "batch"
|
||||||
resources:
|
resources:
|
||||||
- deployments
|
- jobs
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- create
|
- create
|
||||||
- update
|
- update
|
||||||
|
# Temporary so that it can grant these permissions to the created role
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
verbs:
|
||||||
|
- list
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/exec
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
@ -1,338 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2022.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func genDeploymentLabel(s string) map[string]string {
|
|
||||||
return map[string]string{
|
|
||||||
"osbuild": "workload" + s,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Handle registry auth
|
|
||||||
// TODO: This shells out, but needs ENV_VAR with key refs mapping
|
|
||||||
func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container {
|
|
||||||
return v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
Name: fmt.Sprintf("pull-image-%s", id),
|
|
||||||
Image: containerImage,
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Args: []string{
|
|
||||||
fmt.Sprintf(
|
|
||||||
"luet util unpack %s %s",
|
|
||||||
pullImage,
|
|
||||||
"/rootfs",
|
|
||||||
),
|
|
||||||
},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "rootfs",
|
|
||||||
MountPath: "/rootfs",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) v1.Container {
|
|
||||||
return v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
Name: "create-image",
|
|
||||||
Image: containerImage,
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Args: []string{
|
|
||||||
fmt.Sprintf(
|
|
||||||
"tar -czvpf test.tar -C /rootfs . && luet util pack %s test.tar image.tar && mv image.tar /public",
|
|
||||||
pushOptions.ImageName,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "rootfs",
|
|
||||||
MountPath: "/rootfs",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "public",
|
|
||||||
MountPath: "/public",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func osReleaseContainer(containerImage string) v1.Container {
|
|
||||||
return v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
Name: "os-release",
|
|
||||||
Image: containerImage,
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Args: []string{
|
|
||||||
"cp -rfv /etc/os-release /rootfs/etc/os-release",
|
|
||||||
},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "config",
|
|
||||||
MountPath: "/etc/os-release",
|
|
||||||
SubPath: "os-release",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "rootfs",
|
|
||||||
MountPath: "/rootfs",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OSArtifactReconciler) genDeployment(artifact buildv1alpha1.OSArtifact, svc *v1.Service) *appsv1.Deployment {
|
|
||||||
// TODO: svc is unused, but could be used in the future to generate the Netboot URL
|
|
||||||
objMeta := metav1.ObjectMeta{
|
|
||||||
Name: artifact.Name,
|
|
||||||
Namespace: artifact.Namespace,
|
|
||||||
OwnerReferences: genOwner(artifact),
|
|
||||||
}
|
|
||||||
|
|
||||||
pushImage := artifact.Spec.PushOptions.Push
|
|
||||||
|
|
||||||
privileged := false
|
|
||||||
serviceAccount := false
|
|
||||||
|
|
||||||
cmd := fmt.Sprintf(
|
|
||||||
"/entrypoint.sh --debug --name %s build-iso --date=false --output /public dir:/rootfs",
|
|
||||||
artifact.Name,
|
|
||||||
)
|
|
||||||
|
|
||||||
volumeMounts := []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "public",
|
|
||||||
MountPath: "/public",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "rootfs",
|
|
||||||
MountPath: "/rootfs",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.GRUBConfig != "" {
|
|
||||||
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
|
||||||
Name: "config",
|
|
||||||
MountPath: "/iso/iso-overlay/boot/grub2/grub.cfg",
|
|
||||||
SubPath: "grub.cfg",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
cloudImgCmd := fmt.Sprintf(
|
|
||||||
"/raw-images.sh /rootfs /public/%s.raw",
|
|
||||||
artifact.Name,
|
|
||||||
)
|
|
||||||
|
|
||||||
if artifact.Spec.CloudConfig != "" {
|
|
||||||
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
|
||||||
Name: "config",
|
|
||||||
MountPath: "/iso/iso-overlay/cloud_config.yaml",
|
|
||||||
SubPath: "config",
|
|
||||||
})
|
|
||||||
|
|
||||||
cloudImgCmd += " /iso/iso-overlay/cloud_config.yaml"
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.CloudConfig != "" || artifact.Spec.GRUBConfig != "" {
|
|
||||||
cmd = fmt.Sprintf(
|
|
||||||
"/entrypoint.sh --debug --name %s build-iso --date=false --overlay-iso /iso/iso-overlay --output /public dir:/rootfs",
|
|
||||||
artifact.Name,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
buildIsoContainer := v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
||||||
Name: "build-iso",
|
|
||||||
Image: r.ToolImage,
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Args: []string{
|
|
||||||
cmd,
|
|
||||||
},
|
|
||||||
VolumeMounts: volumeMounts,
|
|
||||||
}
|
|
||||||
|
|
||||||
buildCloudImageContainer := v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
||||||
Name: "build-cloud-image",
|
|
||||||
Image: r.ToolImage,
|
|
||||||
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Args: []string{
|
|
||||||
cloudImgCmd,
|
|
||||||
},
|
|
||||||
VolumeMounts: volumeMounts,
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.DiskSize != "" {
|
|
||||||
buildCloudImageContainer.Env = []v1.EnvVar{{
|
|
||||||
Name: "EXTEND",
|
|
||||||
Value: artifact.Spec.DiskSize,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
extractNetboot := v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
||||||
Name: "build-netboot",
|
|
||||||
Image: r.ToolImage,
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Env: []v1.EnvVar{{
|
|
||||||
Name: "URL",
|
|
||||||
Value: artifact.Spec.NetbootURL,
|
|
||||||
}},
|
|
||||||
Args: []string{
|
|
||||||
fmt.Sprintf(
|
|
||||||
"/netboot.sh /public/%s.iso /public/%s",
|
|
||||||
artifact.Name,
|
|
||||||
artifact.Name,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
VolumeMounts: volumeMounts,
|
|
||||||
}
|
|
||||||
|
|
||||||
buildAzureCloudImageContainer := v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
||||||
Name: "build-azure-cloud-image",
|
|
||||||
Image: r.ToolImage,
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Args: []string{
|
|
||||||
fmt.Sprintf(
|
|
||||||
"/azure.sh /public/%s.raw /public/%s.vhd",
|
|
||||||
artifact.Name,
|
|
||||||
artifact.Name,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
VolumeMounts: volumeMounts,
|
|
||||||
}
|
|
||||||
|
|
||||||
buildGCECloudImageContainer := v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
||||||
Name: "build-gce-cloud-image",
|
|
||||||
Image: r.ToolImage,
|
|
||||||
Command: []string{"/bin/bash", "-cxe"},
|
|
||||||
Args: []string{
|
|
||||||
fmt.Sprintf(
|
|
||||||
"/gce.sh /public/%s.raw /public/%s.gce.raw",
|
|
||||||
artifact.Name,
|
|
||||||
artifact.Name,
|
|
||||||
),
|
|
||||||
},
|
|
||||||
VolumeMounts: volumeMounts,
|
|
||||||
}
|
|
||||||
|
|
||||||
servingContainer := v1.Container{
|
|
||||||
ImagePullPolicy: v1.PullAlways,
|
|
||||||
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
|
||||||
Name: "serve",
|
|
||||||
Ports: []v1.ContainerPort{v1.ContainerPort{Name: "http", ContainerPort: 80}},
|
|
||||||
Image: r.ServingImage,
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "public",
|
|
||||||
MountPath: "/usr/share/nginx/html",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
pod := v1.PodSpec{
|
|
||||||
AutomountServiceAccountToken: &serviceAccount,
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "public",
|
|
||||||
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "rootfs",
|
|
||||||
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "config",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
|
||||||
LocalObjectReference: v1.LocalObjectReference{Name: artifact.Name}}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
pod.InitContainers = []v1.Container{unpackContainer("baseimage", r.ToolImage, artifact.Spec.ImageName, artifact.Spec.PullOptions)}
|
|
||||||
|
|
||||||
for i, bundle := range artifact.Spec.Bundles {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, unpackContainer(fmt.Sprint(i), r.ToolImage, bundle, artifact.Spec.PullOptions))
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.OSRelease != "" {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage))
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.ISO || artifact.Spec.Netboot {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, buildIsoContainer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.Netboot {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, extractNetboot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.CloudImage || artifact.Spec.AzureImage || artifact.Spec.GCEImage {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, buildCloudImageContainer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.AzureImage {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, buildAzureCloudImageContainer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if artifact.Spec.GCEImage {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pushImage {
|
|
||||||
pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact.Spec.PushOptions))
|
|
||||||
}
|
|
||||||
|
|
||||||
pod.Containers = []v1.Container{servingContainer}
|
|
||||||
|
|
||||||
deploymentLabels := genDeploymentLabel(artifact.Name)
|
|
||||||
replicas := int32(1)
|
|
||||||
|
|
||||||
return &appsv1.Deployment{
|
|
||||||
ObjectMeta: objMeta,
|
|
||||||
|
|
||||||
Spec: appsv1.DeploymentSpec{
|
|
||||||
Selector: &metav1.LabelSelector{MatchLabels: deploymentLabels},
|
|
||||||
Replicas: &replicas,
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: deploymentLabels,
|
|
||||||
},
|
|
||||||
Spec: pod,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
528
controllers/job.go
Normal file
528
controllers/job.go
Normal file
@ -0,0 +1,528 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
|
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"k8s.io/client-go/tools/remotecommand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func genJobLabel(s string) map[string]string {
|
||||||
|
return map[string]string{
|
||||||
|
"osbuild": "workload" + s,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Handle registry auth
|
||||||
|
// TODO: This shells out, but needs ENV_VAR with key refs mapping
|
||||||
|
// TODO: Cache downloaded images?
|
||||||
|
func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container {
|
||||||
|
return v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
Name: fmt.Sprintf("pull-image-%s", id),
|
||||||
|
Image: containerImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{
|
||||||
|
fmt.Sprintf(
|
||||||
|
"luet util unpack %s %s",
|
||||||
|
pullImage,
|
||||||
|
"/rootfs",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "rootfs",
|
||||||
|
MountPath: "/rootfs",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createImageContainer(containerImage string, pushOptions buildv1alpha1.Push) v1.Container {
|
||||||
|
return v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
Name: "create-image",
|
||||||
|
Image: containerImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{
|
||||||
|
fmt.Sprintf(
|
||||||
|
"tar -czvpf test.tar -C /rootfs . && luet util pack %s test.tar image.tar && mv image.tar /artifacts",
|
||||||
|
pushOptions.ImageName,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "rootfs",
|
||||||
|
MountPath: "/rootfs",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "artifacts",
|
||||||
|
MountPath: "/artifacts",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container {
|
||||||
|
command := fmt.Sprintf("tar cf - -C artifacts/ . | kubectl exec -i -n %s $(kubectl get pods -l %s -n %s --no-headers -o custom-columns=\":metadata.name\" | head -n1) -- tar xf - -C %s", artifactPodInfo.Namespace, artifactPodInfo.Label, artifactPodInfo.Namespace, artifactPodInfo.Path)
|
||||||
|
fmt.Printf("command = %+v\n", command)
|
||||||
|
|
||||||
|
return v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
Name: "push-to-server",
|
||||||
|
Image: containerImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{command},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "rootfs",
|
||||||
|
MountPath: "/rootfs",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "artifacts",
|
||||||
|
MountPath: "/artifacts",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func osReleaseContainer(containerImage string) v1.Container {
|
||||||
|
return v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
Name: "os-release",
|
||||||
|
Image: containerImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{
|
||||||
|
"cp -rfv /etc/os-release /rootfs/etc/os-release",
|
||||||
|
},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "config",
|
||||||
|
MountPath: "/etc/os-release",
|
||||||
|
SubPath: "os-release",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "rootfs",
|
||||||
|
MountPath: "/rootfs",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job {
|
||||||
|
objMeta := genObjectMeta(artifact)
|
||||||
|
|
||||||
|
pushImage := artifact.Spec.PushOptions.Push
|
||||||
|
|
||||||
|
privileged := false
|
||||||
|
serviceAccount := true
|
||||||
|
|
||||||
|
cmd := fmt.Sprintf(
|
||||||
|
"/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs",
|
||||||
|
artifact.Name,
|
||||||
|
)
|
||||||
|
|
||||||
|
volumeMounts := []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "artifacts",
|
||||||
|
MountPath: "/artifacts",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "rootfs",
|
||||||
|
MountPath: "/rootfs",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.GRUBConfig != "" {
|
||||||
|
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
||||||
|
Name: "config",
|
||||||
|
MountPath: "/iso/iso-overlay/boot/grub2/grub.cfg",
|
||||||
|
SubPath: "grub.cfg",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudImgCmd := fmt.Sprintf(
|
||||||
|
"/raw-images.sh /rootfs /artifacts/%s.raw",
|
||||||
|
artifact.Name,
|
||||||
|
)
|
||||||
|
|
||||||
|
if artifact.Spec.CloudConfig != "" {
|
||||||
|
volumeMounts = append(volumeMounts, v1.VolumeMount{
|
||||||
|
Name: "config",
|
||||||
|
MountPath: "/iso/iso-overlay/cloud_config.yaml",
|
||||||
|
SubPath: "config",
|
||||||
|
})
|
||||||
|
|
||||||
|
cloudImgCmd += " /iso/iso-overlay/cloud_config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.CloudConfig != "" || artifact.Spec.GRUBConfig != "" {
|
||||||
|
cmd = fmt.Sprintf(
|
||||||
|
"/entrypoint.sh --debug --name %s build-iso --date=false --overlay-iso /iso/iso-overlay --output /artifacts dir:/rootfs",
|
||||||
|
artifact.Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
buildIsoContainer := v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
||||||
|
Name: "build-iso",
|
||||||
|
Image: r.ToolImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{
|
||||||
|
cmd,
|
||||||
|
},
|
||||||
|
VolumeMounts: volumeMounts,
|
||||||
|
}
|
||||||
|
|
||||||
|
buildCloudImageContainer := v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
||||||
|
Name: "build-cloud-image",
|
||||||
|
Image: r.ToolImage,
|
||||||
|
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{
|
||||||
|
cloudImgCmd,
|
||||||
|
},
|
||||||
|
VolumeMounts: volumeMounts,
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.DiskSize != "" {
|
||||||
|
buildCloudImageContainer.Env = []v1.EnvVar{{
|
||||||
|
Name: "EXTEND",
|
||||||
|
Value: artifact.Spec.DiskSize,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
extractNetboot := v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
||||||
|
Name: "build-netboot",
|
||||||
|
Image: r.ToolImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Env: []v1.EnvVar{{
|
||||||
|
Name: "URL",
|
||||||
|
Value: artifact.Spec.NetbootURL,
|
||||||
|
}},
|
||||||
|
Args: []string{
|
||||||
|
fmt.Sprintf(
|
||||||
|
"/netboot.sh /artifacts/%s.iso /artifacts/%s",
|
||||||
|
artifact.Name,
|
||||||
|
artifact.Name,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
VolumeMounts: volumeMounts,
|
||||||
|
}
|
||||||
|
|
||||||
|
buildAzureCloudImageContainer := v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
||||||
|
Name: "build-azure-cloud-image",
|
||||||
|
Image: r.ToolImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{
|
||||||
|
fmt.Sprintf(
|
||||||
|
"/azure.sh /artifacts/%s.raw /artifacts/%s.vhd",
|
||||||
|
artifact.Name,
|
||||||
|
artifact.Name,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
VolumeMounts: volumeMounts,
|
||||||
|
}
|
||||||
|
|
||||||
|
buildGCECloudImageContainer := v1.Container{
|
||||||
|
ImagePullPolicy: v1.PullAlways,
|
||||||
|
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
|
||||||
|
Name: "build-gce-cloud-image",
|
||||||
|
Image: r.ToolImage,
|
||||||
|
Command: []string{"/bin/bash", "-cxe"},
|
||||||
|
Args: []string{
|
||||||
|
fmt.Sprintf(
|
||||||
|
"/gce.sh /artifacts/%s.raw /artifacts/%s.gce.raw",
|
||||||
|
artifact.Name,
|
||||||
|
artifact.Name,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
VolumeMounts: volumeMounts,
|
||||||
|
}
|
||||||
|
|
||||||
|
pod := v1.PodSpec{
|
||||||
|
AutomountServiceAccountToken: &serviceAccount,
|
||||||
|
ServiceAccountName: objMeta.Name,
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "artifacts",
|
||||||
|
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "rootfs",
|
||||||
|
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "config",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||||
|
LocalObjectReference: v1.LocalObjectReference{Name: artifact.Name}}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.InitContainers = []v1.Container{unpackContainer("baseimage", r.ToolImage, artifact.Spec.ImageName, artifact.Spec.PullOptions)}
|
||||||
|
|
||||||
|
for i, bundle := range artifact.Spec.Bundles {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, unpackContainer(fmt.Sprint(i), r.ToolImage, bundle, artifact.Spec.PullOptions))
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.OSRelease != "" {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage))
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.ISO || artifact.Spec.Netboot {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, buildIsoContainer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.Netboot {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, extractNetboot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.CloudImage || artifact.Spec.AzureImage || artifact.Spec.GCEImage {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, buildCloudImageContainer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.AzureImage {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, buildAzureCloudImageContainer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if artifact.Spec.GCEImage {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Does it make sense to build the image and not push it? Maybe remove
|
||||||
|
// this flag?
|
||||||
|
if pushImage {
|
||||||
|
pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact.Spec.PushOptions))
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Containers = []v1.Container{
|
||||||
|
createPushToServerImageContainer(r.CopierImage, r.ArtifactPodInfo),
|
||||||
|
}
|
||||||
|
|
||||||
|
jobLabels := genJobLabel(artifact.Name)
|
||||||
|
|
||||||
|
job := batchv1.Job{
|
||||||
|
ObjectMeta: objMeta,
|
||||||
|
Spec: batchv1.JobSpec{
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: jobLabels,
|
||||||
|
},
|
||||||
|
Spec: pod,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return &job
|
||||||
|
}
|
||||||
|
|
||||||
|
// createServiceAccount creates a service account that has the permissions to
|
||||||
|
// copy the artifacts to the http server Pod. This service account is used for
|
||||||
|
// the "push to server" container.
|
||||||
|
func (r *OSArtifactReconciler) createCopierServiceAccount(ctx context.Context, objMeta metav1.ObjectMeta) error {
|
||||||
|
sa, err := r.clientSet.CoreV1().
|
||||||
|
ServiceAccounts(objMeta.Namespace).Get(ctx, objMeta.Name, metav1.GetOptions{})
|
||||||
|
if sa == nil || apierrors.IsNotFound(err) {
|
||||||
|
t := true
|
||||||
|
_, err := r.clientSet.CoreV1().ServiceAccounts(objMeta.Namespace).Create(ctx,
|
||||||
|
&v1.ServiceAccount{
|
||||||
|
ObjectMeta: objMeta,
|
||||||
|
AutomountServiceAccountToken: &t,
|
||||||
|
}, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (r *OSArtifactReconciler) createCopierRole(ctx context.Context, objMeta metav1.ObjectMeta) error {
|
||||||
|
// role, err := r.clientSet.RbacV1().
|
||||||
|
// Roles(objMeta.Namespace).
|
||||||
|
// Get(ctx, objMeta.Name, metav1.GetOptions{})
|
||||||
|
// if role == nil || apierrors.IsNotFound(err) {
|
||||||
|
// _, err := r.clientSet.RbacV1().Roles(objMeta.Namespace).Create(ctx,
|
||||||
|
// &rbacv1.Role{
|
||||||
|
// ObjectMeta: objMeta,
|
||||||
|
// Rules: []rbacv1.PolicyRule{
|
||||||
|
// // TODO: The actual permissions we need is that to copy to a Pod.
|
||||||
|
// // The Pod is on another namespace, so we need a cluster wide permission.
|
||||||
|
// // This can get viral because the controller needs to have the permissions
|
||||||
|
// // if it is to grant them to the Job.
|
||||||
|
// {
|
||||||
|
// Verbs: []string{"list"},
|
||||||
|
// APIGroups: []string{""},
|
||||||
|
// Resources: []string{"pods"},
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// metav1.CreateOptions{},
|
||||||
|
// )
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (r *OSArtifactReconciler) createCopierRoleBinding(ctx context.Context, objMeta metav1.ObjectMeta) error {
|
||||||
|
newrb := &rbacv1.RoleBinding{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: objMeta.Name,
|
||||||
|
Namespace: r.ArtifactPodInfo.Namespace,
|
||||||
|
// TODO: We can't have cross-namespace owners. The role binding will have to deleted explicitly by the reconciler (finalizer?)
|
||||||
|
// OwnerReferences: objMeta.OwnerReferences,
|
||||||
|
},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
APIGroup: "rbac.authorization.k8s.io",
|
||||||
|
Kind: "Role",
|
||||||
|
Name: r.ArtifactPodInfo.Role,
|
||||||
|
},
|
||||||
|
Subjects: []rbacv1.Subject{
|
||||||
|
{
|
||||||
|
Kind: "ServiceAccount",
|
||||||
|
Name: objMeta.Name,
|
||||||
|
Namespace: objMeta.Namespace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rb, err := r.clientSet.RbacV1().
|
||||||
|
RoleBindings(r.ArtifactPodInfo.Namespace).
|
||||||
|
Get(ctx, objMeta.Name, metav1.GetOptions{})
|
||||||
|
if rb == nil || apierrors.IsNotFound(err) {
|
||||||
|
_, err := r.clientSet.RbacV1().
|
||||||
|
RoleBindings(r.ArtifactPodInfo.Namespace).
|
||||||
|
Create(ctx, newrb, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// createRBAC creates a ServiceAccount, and a binding to the CopierRole so that
|
||||||
|
// the container that copies the artifacts to the http server Pod has the
|
||||||
|
// permissions to do so.
|
||||||
|
func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
|
||||||
|
objMeta := genObjectMeta(artifact)
|
||||||
|
|
||||||
|
err := r.createCopierServiceAccount(ctx, objMeta)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "creating a service account")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.createCopierRoleBinding(ctx, objMeta)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "creating a role binding for the copy-role")
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeRBAC deletes the role binding between the service account of this artifact
|
||||||
|
// and the CopierRole. The ServiceAccount is removed automatically through the Owner
|
||||||
|
// relationship with the OSArtifact. The RoleBinding can't have it as an owner
|
||||||
|
// because it is in a different Namespace.
|
||||||
|
func (r *OSArtifactReconciler) removeRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
|
||||||
|
err := r.clientSet.RbacV1().RoleBindings(r.ArtifactPodInfo.Namespace).
|
||||||
|
Delete(ctx, artifact.Name, metav1.DeleteOptions{})
|
||||||
|
// Ignore not found. No need to do anything.
|
||||||
|
if err != nil && apierrors.IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OSArtifactReconciler) removeArtifacts(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
|
||||||
|
//Finding Pods using labels
|
||||||
|
fmt.Printf("r.ArtifactPodInfo = %+v\n", r.ArtifactPodInfo.Label)
|
||||||
|
pods, err := r.clientSet.CoreV1().Pods(r.ArtifactPodInfo.Namespace).
|
||||||
|
List(ctx, metav1.ListOptions{LabelSelector: r.ArtifactPodInfo.Label})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, fmt.Sprintf("listing pods with label %s in namespace %s", r.ArtifactPodInfo.Label, r.ArtifactPodInfo.Namespace))
|
||||||
|
}
|
||||||
|
if len(pods.Items) < 1 {
|
||||||
|
return errors.New("No artifact pod found")
|
||||||
|
}
|
||||||
|
pod := pods.Items[0]
|
||||||
|
|
||||||
|
stdout, stderr, err := r.executeRemoteCommand(r.ArtifactPodInfo.Namespace, pod.Name, fmt.Sprintf("rm -rf %s/%s.*", r.ArtifactPodInfo.Path, artifact.Name))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, fmt.Sprintf("%s\n%s", stdout, stderr))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OSArtifactReconciler) executeRemoteCommand(namespace, podName, command string) (string, string, error) {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
errBuf := &bytes.Buffer{}
|
||||||
|
request := r.clientSet.CoreV1().RESTClient().
|
||||||
|
Post().
|
||||||
|
Namespace(namespace).
|
||||||
|
Resource("pods").
|
||||||
|
Name(podName).
|
||||||
|
SubResource("exec").
|
||||||
|
VersionedParams(&v1.PodExecOptions{
|
||||||
|
Command: []string{"/bin/sh", "-c", command},
|
||||||
|
Stdin: false,
|
||||||
|
Stdout: true,
|
||||||
|
Stderr: true,
|
||||||
|
TTY: true,
|
||||||
|
}, scheme.ParameterCodec)
|
||||||
|
|
||||||
|
exec, err := remotecommand.NewSPDYExecutor(r.restConfig, "POST", request.URL())
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
err = exec.Stream(remotecommand.StreamOptions{
|
||||||
|
Stdout: buf,
|
||||||
|
Stderr: errBuf,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("%w Failed executing command %s on %v/%v", err, command, namespace, podName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), errBuf.String(), nil
|
||||||
|
}
|
@ -25,22 +25,42 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
"sigs.k8s.io/cluster-api/util/patch"
|
"sigs.k8s.io/cluster-api/util/patch"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const FinalizerName = "build.kairos.io/osbuilder-finalizer"
|
||||||
|
|
||||||
|
type ArtifactPodInfo struct {
|
||||||
|
Label string
|
||||||
|
Namespace string
|
||||||
|
Path string
|
||||||
|
Role string
|
||||||
|
}
|
||||||
|
|
||||||
// OSArtifactReconciler reconciles a OSArtifact object
|
// OSArtifactReconciler reconciles a OSArtifact object
|
||||||
type OSArtifactReconciler struct {
|
type OSArtifactReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
clientSet *kubernetes.Clientset
|
restConfig *rest.Config
|
||||||
ServingImage, ToolImage string
|
clientSet *kubernetes.Clientset
|
||||||
|
ServingImage, ToolImage, CopierImage string
|
||||||
|
ArtifactPodInfo ArtifactPodInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func genObjectMeta(artifact buildv1alpha1.OSArtifact) metav1.ObjectMeta {
|
||||||
|
return metav1.ObjectMeta{
|
||||||
|
Name: artifact.Name,
|
||||||
|
Namespace: artifact.Namespace,
|
||||||
|
OwnerReferences: genOwner(artifact),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference {
|
func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference {
|
||||||
@ -57,6 +77,12 @@ func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference {
|
|||||||
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/status,verbs=get;update;patch
|
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/status,verbs=get;update;patch
|
||||||
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/finalizers,verbs=update
|
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/finalizers,verbs=update
|
||||||
|
|
||||||
|
// TODO: Is this ^ how I should have created rbac permissions for the controller?
|
||||||
|
// - git commit all changes
|
||||||
|
// - generate code with kubebuilder
|
||||||
|
// - check if my permissions were removed
|
||||||
|
// - do it properly
|
||||||
|
|
||||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||||
// move the current state of the cluster closer to the desired state.
|
// move the current state of the cluster closer to the desired state.
|
||||||
// TODO(user): Modify the Reconcile function to compare the state specified by
|
// TODO(user): Modify the Reconcile function to compare the state specified by
|
||||||
@ -79,17 +105,22 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||||||
|
|
||||||
logger.Info(fmt.Sprintf("Reconciling %v", osbuild))
|
logger.Info(fmt.Sprintf("Reconciling %v", osbuild))
|
||||||
|
|
||||||
|
stop, err := r.handleFinalizer(ctx, &osbuild)
|
||||||
|
if err != nil || stop {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
// generate configmap required for building a custom image
|
// generate configmap required for building a custom image
|
||||||
desiredConfigMap := r.genConfigMap(osbuild)
|
desiredConfigMap := r.genConfigMap(osbuild)
|
||||||
logger.Info(fmt.Sprintf("Checking configmap %v", osbuild))
|
logger.Info(fmt.Sprintf("Checking configmap %v", osbuild))
|
||||||
|
|
||||||
cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, v1.GetOptions{})
|
cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, metav1.GetOptions{})
|
||||||
if cfgMap == nil || apierrors.IsNotFound(err) {
|
if cfgMap == nil || apierrors.IsNotFound(err) {
|
||||||
logger.Info(fmt.Sprintf("Creating service %v", desiredConfigMap))
|
logger.Info(fmt.Sprintf("Creating config map %v", desiredConfigMap))
|
||||||
|
|
||||||
cfgMap, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, v1.CreateOptions{})
|
_, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed while creating svc")
|
logger.Error(err, "Failed while creating config map")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
return ctrl.Result{Requeue: true}, err
|
return ctrl.Result{Requeue: true}, err
|
||||||
@ -98,34 +129,21 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||||||
return ctrl.Result{Requeue: true}, err
|
return ctrl.Result{Requeue: true}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
desiredService := genService(osbuild)
|
|
||||||
logger.Info(fmt.Sprintf("Checking service %v", osbuild))
|
|
||||||
|
|
||||||
svc, err := r.clientSet.CoreV1().Services(req.Namespace).Get(ctx, desiredService.Name, v1.GetOptions{})
|
|
||||||
if svc == nil || apierrors.IsNotFound(err) {
|
|
||||||
logger.Info(fmt.Sprintf("Creating service %v", desiredService))
|
|
||||||
|
|
||||||
svc, err = r.clientSet.CoreV1().Services(req.Namespace).Create(ctx, desiredService, v1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "Failed while creating svc")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl.Result{Requeue: true}, err
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return ctrl.Result{Requeue: true}, err
|
|
||||||
}
|
|
||||||
logger.Info(fmt.Sprintf("Checking deployment %v", osbuild))
|
logger.Info(fmt.Sprintf("Checking deployment %v", osbuild))
|
||||||
|
|
||||||
desiredDeployment := r.genDeployment(osbuild, svc)
|
err = r.createRBAC(ctx, osbuild)
|
||||||
deployment, err := r.clientSet.AppsV1().Deployments(req.Namespace).Get(ctx, desiredDeployment.Name, v1.GetOptions{})
|
if err != nil {
|
||||||
if deployment == nil || apierrors.IsNotFound(err) {
|
return ctrl.Result{Requeue: true}, err
|
||||||
logger.Info(fmt.Sprintf("Creating Deployment %v", deployment))
|
}
|
||||||
|
|
||||||
deployment, err = r.clientSet.AppsV1().Deployments(req.Namespace).Create(ctx, desiredDeployment, v1.CreateOptions{})
|
desiredJob := r.genJob(osbuild)
|
||||||
|
job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, metav1.GetOptions{})
|
||||||
|
if job == nil || apierrors.IsNotFound(err) {
|
||||||
|
logger.Info(fmt.Sprintf("Creating Job %v", job))
|
||||||
|
|
||||||
|
_, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed while creating deployment")
|
logger.Error(err, "Failed while creating job")
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,7 +161,7 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
if deployment.Status.ReadyReplicas == deployment.Status.Replicas {
|
if job.Status.Succeeded > 0 {
|
||||||
copy.Status.Phase = "Ready"
|
copy.Status.Phase = "Ready"
|
||||||
} else if copy.Status.Phase != "Building" {
|
} else if copy.Status.Phase != "Building" {
|
||||||
copy.Status.Phase = "Building"
|
copy.Status.Phase = "Building"
|
||||||
@ -166,13 +184,66 @@ func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||||||
// SetupWithManager sets up the controller with the Manager.
|
// SetupWithManager sets up the controller with the Manager.
|
||||||
func (r *OSArtifactReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *OSArtifactReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
|
||||||
clientset, err := kubernetes.NewForConfig(mgr.GetConfig())
|
cfg := mgr.GetConfig()
|
||||||
|
clientset, err := kubernetes.NewForConfig(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
r.restConfig = cfg
|
||||||
r.clientSet = clientset
|
r.clientSet = clientset
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&buildv1alpha1.OSArtifact{}).
|
For(&buildv1alpha1.OSArtifact{}).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns true if reconciliation should stop or false otherwise
|
||||||
|
func (r *OSArtifactReconciler) handleFinalizer(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) (bool, error) {
|
||||||
|
// examine DeletionTimestamp to determine if object is under deletion
|
||||||
|
if osbuild.DeletionTimestamp.IsZero() {
|
||||||
|
// The object is not being deleted, so if it does not have our finalizer,
|
||||||
|
// then lets add the finalizer and update the object. This is equivalent
|
||||||
|
// registering our finalizer.
|
||||||
|
if !controllerutil.ContainsFinalizer(osbuild, FinalizerName) {
|
||||||
|
controllerutil.AddFinalizer(osbuild, FinalizerName)
|
||||||
|
if err := r.Update(ctx, osbuild); err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The object is being deleted
|
||||||
|
if controllerutil.ContainsFinalizer(osbuild, FinalizerName) {
|
||||||
|
// our finalizer is present, so lets handle any external dependency
|
||||||
|
if err := r.finalize(ctx, osbuild); err != nil {
|
||||||
|
// if fail to delete the external dependency here, return with error
|
||||||
|
// so that it can be retried
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove our finalizer from the list and update it.
|
||||||
|
controllerutil.RemoveFinalizer(osbuild, FinalizerName)
|
||||||
|
if err := r.Update(ctx, osbuild); err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop reconciliation as the item is being deleted
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// - Remove artifacts from the server Pod
|
||||||
|
// - Delete role-binding (because it doesn't have the OSArtifact as an owner and won't be deleted automatically)
|
||||||
|
func (r *OSArtifactReconciler) finalize(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) error {
|
||||||
|
if err := r.removeRBAC(ctx, *osbuild); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.removeArtifacts(ctx, *osbuild); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -1,39 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2022.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package controllers
|
|
||||||
|
|
||||||
import (
|
|
||||||
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func genService(artifact buildv1alpha1.OSArtifact) *v1.Service {
|
|
||||||
objMeta := metav1.ObjectMeta{
|
|
||||||
Name: artifact.Name,
|
|
||||||
Namespace: artifact.Namespace,
|
|
||||||
OwnerReferences: genOwner(artifact),
|
|
||||||
}
|
|
||||||
return &v1.Service{
|
|
||||||
ObjectMeta: objMeta,
|
|
||||||
Spec: v1.ServiceSpec{
|
|
||||||
Type: v1.ServiceTypeNodePort,
|
|
||||||
Ports: []v1.ServicePort{{Name: "http", Port: int32(80)}},
|
|
||||||
Selector: genDeploymentLabel(artifact.Name),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
1
go.mod
1
go.mod
@ -51,6 +51,7 @@ require (
|
|||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/mailru/easyjson v0.7.6 // indirect
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
|
github.com/moby/spdystream v0.2.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
|
3
go.sum
3
go.sum
@ -82,6 +82,7 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m
|
|||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
@ -136,6 +137,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
|
|||||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||||
@ -375,6 +377,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
|||||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
|
github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
|
||||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||||
|
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
21
main.go
21
main.go
@ -52,12 +52,22 @@ func main() {
|
|||||||
var metricsAddr string
|
var metricsAddr string
|
||||||
var enableLeaderElection bool
|
var enableLeaderElection bool
|
||||||
var probeAddr string
|
var probeAddr string
|
||||||
var serveImage, toolImage string
|
var serveImage, toolImage, copierImage string
|
||||||
|
var copyToPodLabel, copyToNamespace, copyToPath, copierRole string
|
||||||
|
|
||||||
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
|
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
|
||||||
|
|
||||||
|
flag.StringVar(&copierImage, "copier-image", "quay.io/kairos/kubectl", "The image that is used to copy artifacts to the server pod.")
|
||||||
flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.")
|
flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.")
|
||||||
// It needs luet inside
|
// It needs luet inside
|
||||||
flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.")
|
flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.")
|
||||||
|
|
||||||
|
// Information on where to copy the artifacts
|
||||||
|
flag.StringVar(©ToPodLabel, "copy-to-pod-label", "", "The label of the Pod to which artifacts should be copied.")
|
||||||
|
flag.StringVar(©ToNamespace, "copy-to-namespace", "", "The namespace of the copy-to-pod-label Pod.")
|
||||||
|
flag.StringVar(©ToPath, "copy-to-path", "", "The path under which to copy artifacts in the copy-to-pod-label Pod.")
|
||||||
|
flag.StringVar(&copierRole, "copy-role", "", "The name or the Kubernetes Role that has the permissions to copy artifacts to the copy-to-pod-label Pod")
|
||||||
|
|
||||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||||
"Enable leader election for controller manager. "+
|
"Enable leader election for controller manager. "+
|
||||||
@ -98,7 +108,14 @@ func main() {
|
|||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
ServingImage: serveImage,
|
ServingImage: serveImage,
|
||||||
ToolImage: toolImage,
|
ToolImage: toolImage,
|
||||||
Scheme: mgr.GetScheme(),
|
CopierImage: copierImage,
|
||||||
|
ArtifactPodInfo: controllers.ArtifactPodInfo{
|
||||||
|
Label: copyToPodLabel,
|
||||||
|
Namespace: copyToNamespace,
|
||||||
|
Path: copyToPath,
|
||||||
|
Role: copierRole,
|
||||||
|
},
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
}).SetupWithManager(mgr); err != nil {
|
}).SetupWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "OSArtifact")
|
setupLog.Error(err, "unable to create controller", "controller", "OSArtifact")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
package e2e_test
|
package e2e_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2"
|
. "github.com/onsi/ginkgo/v2"
|
||||||
@ -20,20 +24,91 @@ var _ = Describe("ISO build test", func() {
|
|||||||
err := kubectl.Apply("", "../../tests/fixtures/simple.yaml")
|
err := kubectl.Apply("", "../../tests/fixtures/simple.yaml")
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
Eventually(func() string {
|
itHasTheCorrectImage()
|
||||||
b, _ := kubectl.GetData("default", "osartifacts", "hello-kairos", "jsonpath={.spec.imageName}")
|
itHasTheCorrectLabels()
|
||||||
return string(b)
|
itCopiesTheArtifacts()
|
||||||
}, 2*time.Minute, 2*time.Second).Should(Equal("quay.io/kairos/core-opensuse:latest"))
|
|
||||||
|
|
||||||
Eventually(func() string {
|
By("deleting the custom resource", func() {
|
||||||
b, _ := kubectl.GetData("default", "deployments", "hello-kairos", "jsonpath={.spec.template.metadata.labels.osbuild}")
|
err = kubectl.New().Delete("osartifacts", "-n", "default", "hello-kairos")
|
||||||
return string(b)
|
Expect(err).ToNot(HaveOccurred())
|
||||||
}, 2*time.Minute, 2*time.Second).Should(Equal("workloadhello-kairos"))
|
})
|
||||||
Eventually(func() string {
|
|
||||||
b, _ := kubectl.GetData("default", "deployments", "hello-kairos", "jsonpath={.spec.status.unavailableReplicas}")
|
itCleansUpRoleBindings()
|
||||||
return string(b)
|
itDeletesTheArtifacts()
|
||||||
}, 15*time.Minute, 2*time.Second).ShouldNot(Equal("1"))
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
func itHasTheCorrectImage() {
|
||||||
|
Eventually(func() string {
|
||||||
|
b, _ := kubectl.GetData("default", "osartifacts", "hello-kairos", "jsonpath={.spec.imageName}")
|
||||||
|
fmt.Printf("looking for image core-opensuse:latest = %+v\n", string(b))
|
||||||
|
return string(b)
|
||||||
|
}, 2*time.Minute, 2*time.Second).Should(Equal("quay.io/kairos/core-opensuse:latest"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func itHasTheCorrectLabels() {
|
||||||
|
Eventually(func() string {
|
||||||
|
b, _ := kubectl.GetData("default", "jobs", "hello-kairos", "jsonpath={.spec.template.metadata.labels.osbuild}")
|
||||||
|
fmt.Printf("looking for label workloadhello-kairos = %+v\n", string(b))
|
||||||
|
return string(b)
|
||||||
|
}, 2*time.Minute, 2*time.Second).Should(Equal("workloadhello-kairos"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func itCopiesTheArtifacts() {
|
||||||
|
nginxNamespace := "osartifactbuilder-operator-system"
|
||||||
|
Eventually(func() string {
|
||||||
|
podName := strings.TrimSpace(findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx"))
|
||||||
|
|
||||||
|
out, _ := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html")
|
||||||
|
|
||||||
|
return out
|
||||||
|
}, 15*time.Minute, 2*time.Second).Should(MatchRegexp("hello-kairos.iso"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func itCleansUpRoleBindings() {
|
||||||
|
nginxNamespace := "osartifactbuilder-operator-system"
|
||||||
|
Eventually(func() string {
|
||||||
|
rb := findRoleBindings(nginxNamespace)
|
||||||
|
|
||||||
|
return rb
|
||||||
|
}, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func itDeletesTheArtifacts() {
|
||||||
|
nginxNamespace := "osartifactbuilder-operator-system"
|
||||||
|
Eventually(func() string {
|
||||||
|
podName := findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx")
|
||||||
|
|
||||||
|
out, err := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html")
|
||||||
|
Expect(err).ToNot(HaveOccurred(), out)
|
||||||
|
|
||||||
|
return out
|
||||||
|
}, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos.iso"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func findPodsWithLabel(namespace, label string) string {
|
||||||
|
kubectlCommand := fmt.Sprintf("kubectl get pods -n %s -l %s --no-headers -o custom-columns=\":metadata.name\" | head -n1", namespace, label)
|
||||||
|
cmd := exec.Command("bash", "-c", kubectlCommand)
|
||||||
|
var out bytes.Buffer
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
err := cmd.Run()
|
||||||
|
Expect(err).ToNot(HaveOccurred(), stderr.String())
|
||||||
|
|
||||||
|
return strings.TrimSpace(out.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func findRoleBindings(namespace string) string {
|
||||||
|
kubectlCommand := fmt.Sprintf("kubectl get rolebindings -n %s --no-headers -o custom-columns=\":metadata.name\"", namespace)
|
||||||
|
cmd := exec.Command("bash", "-c", kubectlCommand)
|
||||||
|
var out bytes.Buffer
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
err := cmd.Run()
|
||||||
|
Expect(err).ToNot(HaveOccurred(), stderr.String())
|
||||||
|
|
||||||
|
return strings.TrimSpace(out.String())
|
||||||
|
}
|
||||||
|
4
tests/fixtures/simple.yaml
vendored
4
tests/fixtures/simple.yaml
vendored
@ -6,7 +6,7 @@ spec:
|
|||||||
imageName: "quay.io/kairos/core-opensuse:latest"
|
imageName: "quay.io/kairos/core-opensuse:latest"
|
||||||
iso: true
|
iso: true
|
||||||
bundles:
|
bundles:
|
||||||
- quay.io/kairos/packages:goreleaser-utils-1.11.1
|
- quay.io/kairos/packages:goreleaser-utils-1.13.1
|
||||||
grubConfig: |
|
grubConfig: |
|
||||||
search --file --set=root /boot/kernel.xz
|
search --file --set=root /boot/kernel.xz
|
||||||
set default=0
|
set default=0
|
||||||
@ -49,4 +49,4 @@ spec:
|
|||||||
device: "/dev/sda"
|
device: "/dev/sda"
|
||||||
reboot: true
|
reboot: true
|
||||||
poweroff: true
|
poweroff: true
|
||||||
auto: true # Required, for automated installations
|
auto: true # Required, for automated installations
|
||||||
|
Loading…
Reference in New Issue
Block a user