Compare commits

..

1 Commits

Author SHA1 Message Date
Mauro Morales
6751d7421f Add dmsetup remove before kpartx -d
Signed-off-by: Mauro Morales <mauro.morales@spectrocloud.com>
2023-05-24 14:48:44 +02:00
50 changed files with 1087 additions and 9381 deletions

View File

@@ -7,7 +7,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install earthly
uses: Luet-lab/luet-install-action@v1
with:

View File

@@ -8,16 +8,12 @@ on:
tags:
- '*'
concurrency:
group: image-${{ github.ref || github.head_ref }}
cancel-in-progress: true
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Prepare
id: prep
@@ -50,14 +46,14 @@ jobs:
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_PASSWORD }}
- name: Build
uses: docker/build-push-action@v5
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .

View File

@@ -1,21 +0,0 @@
name: Lint
on:
push:
branches:
- master
pull_request:
paths:
- '**'
concurrency:
group: lint-${{ github.ref || github.head_ref }}
cancel-in-progress: true
env:
FORCE_COLOR: 1
jobs:
call-workflow:
uses: kairos-io/linting-composite-action/.github/workflows/reusable-linting.yaml@v0.0.8
with:
yamldirs: ".github/workflows/ config/ tools-image/"
is-go: true

View File

@@ -9,24 +9,12 @@ on:
- '*'
pull_request:
concurrency:
group: test-${{ github.ref || github.head_ref }}
cancel-in-progress: true
jobs:
e2e-tests:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Test
run: |
make kind-e2e-tests
controller-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Test
run: |
make controller-tests

View File

@@ -7,16 +7,13 @@ on:
- master
tags:
- '*'
concurrency:
group: tool-image-${{ github.ref || github.head_ref }}
cancel-in-progress: true
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Prepare
id: prep
@@ -49,14 +46,14 @@ jobs:
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_PASSWORD }}
- name: Build
uses: docker/build-push-action@v5
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: ./tools-image

View File

@@ -1,5 +0,0 @@
disable=SC2086
disable=SC2034
disable=SC2046
disable=SC2068
disable=SC2154

View File

@@ -1,21 +0,0 @@
extends: default
rules:
# 80 chars should be enough, but don't fail if a line is longer
line-length:
max: 150
level: warning
# accept both key:
# - item
#
# and key:
# - item
indentation:
indent-sequences: whatever
truthy:
check-keys: false
document-start:
present: false

View File

@@ -16,10 +16,7 @@ bump-repositories:
COPY +last-commit-packages/REPO_ARM64 REPO_ARM64
ARG REPO_AMD64=$(cat REPO_AMD64)
ARG REPO_ARM64=$(cat REPO_ARM64)
COPY tools-image/luet-amd64.yaml luet-amd64.yaml
COPY tools-image/luet-arm64.yaml luet-arm64.yaml
RUN yq eval ".repositories[0] |= . * { \"reference\": \"${REPO_AMD64}\" }" -i luet-amd64.yaml
RUN yq eval ".repositories[0] |= . * { \"reference\": \"${REPO_ARM64}\" }" -i luet-arm64.yaml
SAVE ARTIFACT luet-arm64.yaml AS LOCAL tools-image/luet-arm64.yaml
SAVE ARTIFACT luet-amd64.yaml AS LOCAL tools-image/luet-amd64.yaml
COPY tools-image/luet.yaml luet.yaml
RUN yq eval ".repositories[0] |= . * { \"reference\": \"${REPO_AMD64}\" }" -i luet.yaml
RUN yq eval ".repositories[1] |= . * { \"reference\": \"${REPO_ARM64}\" }" -i luet.yaml
SAVE ARTIFACT luet.yaml AS LOCAL tools-image/luet.yaml

View File

@@ -263,9 +263,6 @@ kind-setup:
kind-setup-image: docker-build
kind load docker-image --name $(CLUSTER_NAME) ${IMG}
kind-teardown:
kind delete cluster --name ${CLUSTER_NAME} || true
.PHONY: test_deps
test_deps:
go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo
@@ -278,11 +275,9 @@ unit-tests: test_deps
e2e-tests:
GINKGO=$(GINKGO) KUBE_VERSION=${KUBE_VERSION} $(ROOT_DIR)/script/test.sh
controller-tests: ginkgo kind-setup install undeploy-dev deploy-dev
USE_EXISTING_CLUSTER=true go run github.com/onsi/ginkgo/v2/ginkgo -v run controllers/.
kind-e2e-tests: ginkgo kind-setup install undeploy-dev deploy-dev e2e-tests
kubesplit: manifests kustomize
rm -rf helm-chart
mkdir helm-chart

View File

@@ -14,10 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha2 contains API Schema definitions for the build v1alpha2 API group
// Package v1alpha1 contains API Schema definitions for the build v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=build.kairos.io
package v1alpha2
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,7 +26,7 @@ import (
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "build.kairos.io", Version: "v1alpha2"}
GroupVersion = schema.GroupVersion{Group: "build.kairos.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}

View File

@@ -14,27 +14,23 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
package v1alpha1
import (
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// OSArtifactSpec defines the desired state of OSArtifact
type OSArtifactSpec struct {
// There are 3 ways to specify a Kairos image:
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Points to a prepared kairos image (e.g. a released one)
// Foo is an example field of OSArtifact. Edit osartifact_types.go to remove/update
ImageName string `json:"imageName,omitempty"`
// Points to a vanilla (non-Kairos) image. osbuilder will try to convert this to a Kairos image
BaseImageName string `json:"baseImageName,omitempty"`
// Points to a Secret that contains a Dockerfile. osbuilder will build the image using that Dockerfile and will try to create a Kairos image from it.
BaseImageDockerfile *SecretKeySelector `json:"baseImageDockerfile,omitempty"`
// This needs to be revisited
ISO bool `json:"iso,omitempty"`
//Disk-only stuff
@@ -46,42 +42,47 @@ type OSArtifactSpec struct {
Netboot bool `json:"netboot,omitempty"`
NetbootURL string `json:"netbootURL,omitempty"`
CloudConfigRef *SecretKeySelector `json:"cloudConfigRef,omitempty"`
GRUBConfig string `json:"grubConfig,omitempty"`
// TODO: treat cloudconfig as a secret, and take a secretRef where to store it (optionally)
CloudConfig string `json:"cloudConfig,omitempty"`
GRUBConfig string `json:"grubConfig,omitempty"`
Bundles []string `json:"bundles,omitempty"`
OSRelease string `json:"osRelease,omitempty"`
Bundles []string `json:"bundles,omitempty"`
PullOptions Pull `json:"pull,omitempty"`
OSRelease string `json:"osRelease,omitempty"`
// TODO: Currently not used. Reserved to be used when we have a way to push to registries.
PushOptions Push `json:"push,omitempty"`
}
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
Exporters []batchv1.JobSpec `json:"exporters,omitempty"`
Volume *corev1.PersistentVolumeClaimSpec `json:"volume,omitempty"`
type Push struct {
Push bool `json:"push,omitempty"`
ImageName string `json:"imageName,omitempty"`
ContainerRegistryCredentials *SecretKeySelector `json:"containerRegistryCredentials,omitempty"`
}
type Pull struct {
ContainerRegistryCredentials *SecretKeySelector `json:"containerRegistryCredentials,omitempty"`
}
type LocalObjectReference struct {
Name string `json:"name"`
}
type SecretKeySelector struct {
Name string `json:"name"`
LocalObjectReference `json:",inline"`
// +optional
Namespace string `json:"namespace,omitempty"`
// +optional
Key string `json:"key,omitempty"`
}
type ArtifactPhase string
const (
Pending = "Pending"
Building = "Building"
Exporting = "Exporting"
Ready = "Ready"
Error = "Error"
)
// OSArtifactStatus defines the observed state of OSArtifact
type OSArtifactStatus struct {
// +kubebuilder:default=Pending
Phase ArtifactPhase `json:"phase,omitempty"`
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
Phase string `json:"phase,omitempty"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
// OSArtifact is the Schema for the osartifacts API
type OSArtifact struct {

View File

@@ -19,14 +19,27 @@ limitations under the License.
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha2
package v1alpha1
import (
batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference.
func (in *LocalObjectReference) DeepCopy() *LocalObjectReference {
if in == nil {
return nil
}
out := new(LocalObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSArtifact) DeepCopyInto(out *OSArtifact) {
*out = *in
@@ -89,38 +102,13 @@ func (in *OSArtifactList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSArtifactSpec) DeepCopyInto(out *OSArtifactSpec) {
*out = *in
if in.BaseImageDockerfile != nil {
in, out := &in.BaseImageDockerfile, &out.BaseImageDockerfile
*out = new(SecretKeySelector)
**out = **in
}
if in.CloudConfigRef != nil {
in, out := &in.CloudConfigRef, &out.CloudConfigRef
*out = new(SecretKeySelector)
**out = **in
}
if in.Bundles != nil {
in, out := &in.Bundles, &out.Bundles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ImagePullSecrets != nil {
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
if in.Exporters != nil {
in, out := &in.Exporters, &out.Exporters
*out = make([]batchv1.JobSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Volume != nil {
in, out := &in.Volume, &out.Volume
*out = new(v1.PersistentVolumeClaimSpec)
(*in).DeepCopyInto(*out)
}
in.PullOptions.DeepCopyInto(&out.PullOptions)
in.PushOptions.DeepCopyInto(&out.PushOptions)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSArtifactSpec.
@@ -148,9 +136,50 @@ func (in *OSArtifactStatus) DeepCopy() *OSArtifactStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Pull) DeepCopyInto(out *Pull) {
*out = *in
if in.ContainerRegistryCredentials != nil {
in, out := &in.ContainerRegistryCredentials, &out.ContainerRegistryCredentials
*out = new(SecretKeySelector)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pull.
func (in *Pull) DeepCopy() *Pull {
if in == nil {
return nil
}
out := new(Pull)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Push) DeepCopyInto(out *Push) {
*out = *in
if in.ContainerRegistryCredentials != nil {
in, out := &in.ContainerRegistryCredentials, &out.ContainerRegistryCredentials
*out = new(SecretKeySelector)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Push.
func (in *Push) DeepCopy() *Push {
if in == nil {
return nil
}
out := new(Push)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) {
*out = *in
out.LocalObjectReference = in.LocalObjectReference
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector.

File diff suppressed because it is too large Load Diff

View File

@@ -73,11 +73,13 @@ vars:
# kind: Service
# version: v1
# name: webhook-service
vars:
- name: NGINX_NAMESPACE
objref:
kind: Namespace
name: system
apiVersion: v1
apiVersion: v1
- name: ARTIFACT_COPIER_ROLE
objref:

View File

@@ -38,3 +38,7 @@ spec:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"
- "--copy-to-namespace=$(NGINX_NAMESPACE)"
- "--copy-role=$(ARTIFACT_COPIER_ROLE)"
- --copy-to-pod-label=app.kubernetes.io/name=osbuilder-nginx
- --copy-to-path="/usr/share/nginx/html"

View File

@@ -9,13 +9,13 @@ webhook:
leaderElection:
leaderElect: true
resourceName: 98ca89ca.kairos.io
# leaderElectionReleaseOnCancel defines if the leader should step down volume
# leaderElectionReleaseOnCancel defines if the leader should step down volume
# when the Manager ends. This requires the binary to immediately end when the
# Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
# speeds up voluntary leader transitions as the new leader don't have to wait
# LeaseDuration time first.
# In the default scaffold provided, the program ends immediately after
# the manager stops, so would be fine to enable this option. However,
# if you are doing or is intended to do any operation such as perform cleanups
# In the default scaffold provided, the program ends immediately after
# the manager stops, so would be fine to enable this option. However,
# if you are doing or is intended to do any operation such as perform cleanups
# after the manager stops then its usage might be unsafe.
# leaderElectionReleaseOnCancel: true

View File

@@ -1,3 +1,4 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor

View File

@@ -16,3 +16,6 @@ resources:
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml
patchesStrategicMerge:
- role_custom.yaml

View File

@@ -5,49 +5,6 @@ metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- batch
resources:
- jobs
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- build.kairos.io
resources:

View File

@@ -0,0 +1,94 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- roles
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- build.kairos.io
resources:
- osartifacts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- build.kairos.io
resources:
- osartifacts/finalizers
verbs:
- update
- apiGroups:
- build.kairos.io
resources:
- osartifacts/status
verbs:
- get
- patch
- update
- apiGroups:
- build.kairos.io
resources:
- osartifacts/finalizers
verbs:
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
- update
- apiGroups:
- "batch"
resources:
- jobs
verbs:
- get
- create
- update
# Temporary so that it can grant these permissions to the created role
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- get
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create

View File

@@ -17,19 +17,22 @@ limitations under the License.
package controllers
import (
osbuilder "github.com/kairos-io/osbuilder/api/v1alpha2"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (r *OSArtifactReconciler) genConfigMap(artifact *osbuilder.OSArtifact) *v1.ConfigMap {
func (r *OSArtifactReconciler) genConfigMap(artifact buildv1alpha1.OSArtifact) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: artifact.Name,
Namespace: artifact.Namespace,
Name: artifact.Name,
Namespace: artifact.Namespace,
OwnerReferences: genOwner(artifact),
},
Data: map[string]string{
"config": artifact.Spec.CloudConfig,
"grub.cfg": artifact.Spec.GRUBConfig,
"os-release": artifact.Spec.OSRelease,
}}

View File

@@ -17,18 +17,35 @@ limitations under the License.
package controllers
import (
"bytes"
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
"github.com/pkg/errors"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
osbuilder "github.com/kairos-io/osbuilder/api/v1alpha2"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/remotecommand"
)
func unpackContainer(id, containerImage, pullImage string) corev1.Container {
return corev1.Container{
ImagePullPolicy: corev1.PullAlways,
func genJobLabel(s string) map[string]string {
return map[string]string{
"osbuild": "workload" + s,
}
}
// TODO: Handle registry auth
// TODO: This shells out, but needs ENV_VAR with key refs mapping
// TODO: Cache downloaded images?
func unpackContainer(id, containerImage, pullImage string, pullOptions buildv1alpha1.Pull) v1.Container {
return v1.Container{
ImagePullPolicy: v1.PullAlways,
Name: fmt.Sprintf("pull-image-%s", id),
Image: containerImage,
Command: []string{"/bin/bash", "-cxe"},
@@ -39,7 +56,7 @@ func unpackContainer(id, containerImage, pullImage string) corev1.Container {
"/rootfs",
),
},
VolumeMounts: []corev1.VolumeMount{
VolumeMounts: []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
@@ -48,19 +65,19 @@ func unpackContainer(id, containerImage, pullImage string) corev1.Container {
}
}
func pushImageName(artifact *osbuilder.OSArtifact) string {
pushName := artifact.Spec.ImageName
func pushImageName(artifact buildv1alpha1.OSArtifact) string {
pushName := artifact.Spec.PushOptions.ImageName
if pushName != "" {
return pushName
}
return artifact.Name
}
func createImageContainer(containerImage string, artifact *osbuilder.OSArtifact) corev1.Container {
func createImageContainer(containerImage string, artifact buildv1alpha1.OSArtifact) v1.Container {
imageName := pushImageName(artifact)
return corev1.Container{
ImagePullPolicy: corev1.PullAlways,
return v1.Container{
ImagePullPolicy: v1.PullAlways,
Name: "create-image",
Image: containerImage,
Command: []string{"/bin/bash", "-cxe"},
@@ -71,7 +88,7 @@ func createImageContainer(containerImage string, artifact *osbuilder.OSArtifact)
artifact.Name,
),
},
VolumeMounts: []corev1.VolumeMount{
VolumeMounts: []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
@@ -84,16 +101,39 @@ func createImageContainer(containerImage string, artifact *osbuilder.OSArtifact)
}
}
func osReleaseContainer(containerImage string) corev1.Container {
return corev1.Container{
ImagePullPolicy: corev1.PullAlways,
func createPushToServerImageContainer(containerImage string, artifactPodInfo ArtifactPodInfo) v1.Container {
command := fmt.Sprintf("tar cf - -C artifacts/ . | kubectl exec -i -n %s $(kubectl get pods -l %s -n %s --no-headers -o custom-columns=\":metadata.name\" | head -n1) -- tar xf - -C %s", artifactPodInfo.Namespace, artifactPodInfo.Label, artifactPodInfo.Namespace, artifactPodInfo.Path)
fmt.Printf("command = %+v\n", command)
return v1.Container{
ImagePullPolicy: v1.PullAlways,
Name: "push-to-server",
Image: containerImage,
Command: []string{"/bin/bash", "-cxe"},
Args: []string{command},
VolumeMounts: []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
},
{
Name: "artifacts",
MountPath: "/artifacts",
},
},
}
}
func osReleaseContainer(containerImage string) v1.Container {
return v1.Container{
ImagePullPolicy: v1.PullAlways,
Name: "os-release",
Image: containerImage,
Command: []string{"/bin/bash", "-cxe"},
Args: []string{
"cp -rfv /etc/os-release /rootfs/etc/os-release",
},
VolumeMounts: []corev1.VolumeMount{
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
MountPath: "/etc/os-release",
@@ -107,38 +147,18 @@ func osReleaseContainer(containerImage string) corev1.Container {
}
}
func (r *OSArtifactReconciler) newArtifactPVC(artifact *osbuilder.OSArtifact) *corev1.PersistentVolumeClaim {
if artifact.Spec.Volume == nil {
artifact.Spec.Volume = &corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
"storage": resource.MustParse("10Gi"),
},
},
}
}
func (r *OSArtifactReconciler) genJob(artifact buildv1alpha1.OSArtifact) *batchv1.Job {
objMeta := genObjectMeta(artifact)
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: artifact.Name + "-artifacts",
Namespace: artifact.Namespace,
},
Spec: *artifact.Spec.Volume,
}
privileged := false
serviceAccount := true
return pvc
}
func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder.OSArtifact) *corev1.Pod {
cmd := fmt.Sprintf(
"/entrypoint.sh --debug --name %s build-iso --date=false --output /artifacts dir:/rootfs",
artifact.Name,
)
volumeMounts := []corev1.VolumeMount{
volumeMounts := []v1.VolumeMount{
{
Name: "artifacts",
MountPath: "/artifacts",
@@ -150,7 +170,7 @@ func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder
}
if artifact.Spec.GRUBConfig != "" {
volumeMounts = append(volumeMounts, corev1.VolumeMount{
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: "config",
MountPath: "/iso/iso-overlay/boot/grub2/grub.cfg",
SubPath: "grub.cfg",
@@ -162,26 +182,26 @@ func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder
artifact.Name,
)
if artifact.Spec.CloudConfigRef != nil {
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: "cloudconfig",
if artifact.Spec.CloudConfig != "" {
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: "config",
MountPath: "/iso/iso-overlay/cloud_config.yaml",
SubPath: artifact.Spec.CloudConfigRef.Key,
SubPath: "config",
})
cloudImgCmd += " /iso/iso-overlay/cloud_config.yaml"
}
if artifact.Spec.CloudConfigRef != nil || artifact.Spec.GRUBConfig != "" {
if artifact.Spec.CloudConfig != "" || artifact.Spec.GRUBConfig != "" {
cmd = fmt.Sprintf(
"/entrypoint.sh --debug --name %s build-iso --date=false --overlay-iso /iso/iso-overlay --output /artifacts dir:/rootfs",
artifact.Name,
)
}
buildIsoContainer := corev1.Container{
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{Privileged: ptr(true)},
buildIsoContainer := v1.Container{
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
Name: "build-iso",
Image: r.ToolImage,
Command: []string{"/bin/bash", "-cxe"},
@@ -191,9 +211,9 @@ func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder
VolumeMounts: volumeMounts,
}
buildCloudImageContainer := corev1.Container{
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{Privileged: ptr(true)},
buildCloudImageContainer := v1.Container{
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
Name: "build-cloud-image",
Image: r.ToolImage,
@@ -205,19 +225,19 @@ func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder
}
if artifact.Spec.DiskSize != "" {
buildCloudImageContainer.Env = []corev1.EnvVar{{
buildCloudImageContainer.Env = []v1.EnvVar{{
Name: "EXTEND",
Value: artifact.Spec.DiskSize,
}}
}
extractNetboot := corev1.Container{
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{Privileged: ptr(true)},
extractNetboot := v1.Container{
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
Name: "build-netboot",
Image: r.ToolImage,
Command: []string{"/bin/bash", "-cxe"},
Env: []corev1.EnvVar{{
Env: []v1.EnvVar{{
Name: "URL",
Value: artifact.Spec.NetbootURL,
}},
@@ -231,9 +251,9 @@ func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder
VolumeMounts: volumeMounts,
}
buildAzureCloudImageContainer := corev1.Container{
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{Privileged: ptr(true)},
buildAzureCloudImageContainer := v1.Container{
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
Name: "build-azure-cloud-image",
Image: r.ToolImage,
Command: []string{"/bin/bash", "-cxe"},
@@ -247,9 +267,9 @@ func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder
VolumeMounts: volumeMounts,
}
buildGCECloudImageContainer := corev1.Container{
ImagePullPolicy: corev1.PullAlways,
SecurityContext: &corev1.SecurityContext{Privileged: ptr(true)},
buildGCECloudImageContainer := v1.Container{
ImagePullPolicy: v1.PullAlways,
SecurityContext: &v1.SecurityContext{Privileged: &privileged},
Name: "build-gce-cloud-image",
Image: r.ToolImage,
Command: []string{"/bin/bash", "-cxe"},
@@ -263,189 +283,251 @@ func (r *OSArtifactReconciler) newBuilderPod(pvcName string, artifact *osbuilder
VolumeMounts: volumeMounts,
}
podSpec := corev1.PodSpec{
AutomountServiceAccountToken: ptr(false),
RestartPolicy: corev1.RestartPolicyNever,
Volumes: []corev1.Volume{
pod := v1.PodSpec{
AutomountServiceAccountToken: &serviceAccount,
ServiceAccountName: objMeta.Name,
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "artifacts",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
ReadOnly: false,
},
},
Name: "artifacts",
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
},
{
Name: "rootfs",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
},
{
Name: "config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: artifact.Name,
},
},
},
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{Name: artifact.Name}}},
},
},
}
if artifact.Spec.BaseImageDockerfile != nil {
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{
Name: "dockerfile",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: artifact.Spec.BaseImageDockerfile.Name,
},
},
})
}
if artifact.Spec.CloudConfigRef != nil {
podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{
Name: "cloudconfig",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: artifact.Spec.CloudConfigRef.Name,
Optional: ptr(true),
},
},
})
}
podSpec.ImagePullSecrets = append(podSpec.ImagePullSecrets, artifact.Spec.ImagePullSecrets...)
podSpec.InitContainers = []corev1.Container{}
// Base image can be:
// - built from a dockerfile and converted to a kairos one
// - built by converting an existing image to a kairos one
// - a prebuilt kairos image
if artifact.Spec.BaseImageDockerfile != nil {
podSpec.InitContainers = append(podSpec.InitContainers, baseImageBuildContainers()...)
} else if artifact.Spec.BaseImageName != "" { // Existing base image - non kairos
podSpec.InitContainers = append(podSpec.InitContainers,
unpackContainer("baseimage-non-kairos", r.ToolImage, artifact.Spec.BaseImageName))
} else { // Existing Kairos base image
podSpec.InitContainers = append(podSpec.InitContainers, unpackContainer("baseimage", r.ToolImage, artifact.Spec.ImageName))
}
// If base image was a non kairos one, either one we built with kaniko or prebuilt,
// convert it to a Kairos one, in a best effort manner.
if artifact.Spec.BaseImageDockerfile != nil || artifact.Spec.BaseImageName != "" {
podSpec.InitContainers = append(podSpec.InitContainers,
corev1.Container{
ImagePullPolicy: corev1.PullAlways,
Name: "convert-to-kairos",
Image: "busybox",
Command: []string{"/bin/echo"},
Args: []string{"TODO"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
},
},
})
}
pod.InitContainers = []v1.Container{unpackContainer("baseimage", r.ToolImage, artifact.Spec.ImageName, artifact.Spec.PullOptions)}
for i, bundle := range artifact.Spec.Bundles {
podSpec.InitContainers = append(podSpec.InitContainers, unpackContainer(fmt.Sprint(i), r.ToolImage, bundle))
pod.InitContainers = append(pod.InitContainers, unpackContainer(fmt.Sprint(i), r.ToolImage, bundle, artifact.Spec.PullOptions))
}
if artifact.Spec.OSRelease != "" {
podSpec.InitContainers = append(podSpec.InitContainers, osReleaseContainer(r.ToolImage))
pod.InitContainers = append(pod.InitContainers, osReleaseContainer(r.ToolImage))
}
if artifact.Spec.ISO || artifact.Spec.Netboot {
podSpec.Containers = append(podSpec.Containers, buildIsoContainer)
pod.InitContainers = append(pod.InitContainers, buildIsoContainer)
}
if artifact.Spec.Netboot {
podSpec.Containers = append(podSpec.Containers, extractNetboot)
pod.InitContainers = append(pod.InitContainers, extractNetboot)
}
if artifact.Spec.CloudImage {
podSpec.Containers = append(podSpec.Containers, buildCloudImageContainer)
if artifact.Spec.CloudImage || artifact.Spec.AzureImage || artifact.Spec.GCEImage {
pod.InitContainers = append(pod.InitContainers, buildCloudImageContainer)
}
if artifact.Spec.AzureImage {
podSpec.Containers = append(podSpec.Containers, buildAzureCloudImageContainer)
pod.InitContainers = append(pod.InitContainers, buildAzureCloudImageContainer)
}
if artifact.Spec.GCEImage {
podSpec.Containers = append(podSpec.Containers, buildGCECloudImageContainer)
pod.InitContainers = append(pod.InitContainers, buildGCECloudImageContainer)
}
podSpec.Containers = append(podSpec.Containers, createImageContainer(r.ToolImage, artifact))
pod.InitContainers = append(pod.InitContainers, createImageContainer(r.ToolImage, artifact))
return &corev1.Pod{
pod.Containers = []v1.Container{
createPushToServerImageContainer(r.CopierImage, r.ArtifactPodInfo),
}
jobLabels := genJobLabel(artifact.Name)
job := batchv1.Job{
ObjectMeta: objMeta,
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: jobLabels,
},
Spec: pod,
},
},
}
return &job
}
// createServiceAccount creates a service account that has the permissions to
// copy the artifacts to the http server Pod. This service account is used for
// the "push to server" container.
func (r *OSArtifactReconciler) createCopierServiceAccount(ctx context.Context, objMeta metav1.ObjectMeta) error {
sa, err := r.clientSet.CoreV1().
ServiceAccounts(objMeta.Namespace).Get(ctx, objMeta.Name, metav1.GetOptions{})
if sa == nil || apierrors.IsNotFound(err) {
t := true
_, err := r.clientSet.CoreV1().ServiceAccounts(objMeta.Namespace).Create(ctx,
&v1.ServiceAccount{
ObjectMeta: objMeta,
AutomountServiceAccountToken: &t,
}, metav1.CreateOptions{})
if err != nil {
return err
}
}
return err
}
// func (r *OSArtifactReconciler) createCopierRole(ctx context.Context, objMeta metav1.ObjectMeta) error {
// role, err := r.clientSet.RbacV1().
// Roles(objMeta.Namespace).
// Get(ctx, objMeta.Name, metav1.GetOptions{})
// if role == nil || apierrors.IsNotFound(err) {
// _, err := r.clientSet.RbacV1().Roles(objMeta.Namespace).Create(ctx,
// &rbacv1.Role{
// ObjectMeta: objMeta,
// Rules: []rbacv1.PolicyRule{
// // TODO: The actual permissions we need is that to copy to a Pod.
// // The Pod is on another namespace, so we need a cluster wide permission.
// // This can get viral because the controller needs to have the permissions
// // if it is to grant them to the Job.
// {
// Verbs: []string{"list"},
// APIGroups: []string{""},
// Resources: []string{"pods"},
// },
// },
// },
// metav1.CreateOptions{},
// )
// if err != nil {
// return err
// }
// }
// return err
// }
func (r *OSArtifactReconciler) createCopierRoleBinding(ctx context.Context, objMeta metav1.ObjectMeta) error {
newrb := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
GenerateName: artifact.Name + "-",
Namespace: artifact.Namespace,
Name: objMeta.Name,
Namespace: r.ArtifactPodInfo.Namespace,
// TODO: We can't have cross-namespace owners. The role binding will have to deleted explicitly by the reconciler (finalizer?)
// OwnerReferences: objMeta.OwnerReferences,
},
Spec: podSpec,
}
}
func ptr[T any](val T) *T {
return &val
}
func baseImageBuildContainers() []corev1.Container {
return []corev1.Container{
corev1.Container{
ImagePullPolicy: corev1.PullAlways,
Name: "kaniko-build",
Image: "gcr.io/kaniko-project/executor:latest",
Args: []string{
"--dockerfile", "dockerfile/Dockerfile",
"--context", "dir://workspace",
"--destination", "whatever", // We don't push, but it needs this
"--tar-path", "/rootfs/image.tar",
"--no-push",
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
},
{
Name: "dockerfile",
MountPath: "/workspace/dockerfile",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: r.ArtifactPodInfo.Role,
},
corev1.Container{
ImagePullPolicy: corev1.PullAlways,
Name: "image-extractor",
Image: "quay.io/luet/base",
Args: []string{
"util", "unpack", "--local", "file:////rootfs/image.tar", "/rootfs",
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
},
},
},
corev1.Container{
ImagePullPolicy: corev1.PullAlways,
Name: "cleanup",
Image: "busybox",
Command: []string{"/bin/rm"},
Args: []string{
"/rootfs/image.tar",
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: objMeta.Name,
Namespace: objMeta.Namespace,
},
},
}
rb, err := r.clientSet.RbacV1().
RoleBindings(r.ArtifactPodInfo.Namespace).
Get(ctx, objMeta.Name, metav1.GetOptions{})
if rb == nil || apierrors.IsNotFound(err) {
_, err := r.clientSet.RbacV1().
RoleBindings(r.ArtifactPodInfo.Namespace).
Create(ctx, newrb, metav1.CreateOptions{})
if err != nil {
return err
}
}
return err
}
// createRBAC creates a ServiceAccount, and a binding to the CopierRole so that
// the container that copies the artifacts to the http server Pod has the
// permissions to do so.
func (r *OSArtifactReconciler) createRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
objMeta := genObjectMeta(artifact)
err := r.createCopierServiceAccount(ctx, objMeta)
if err != nil {
return errors.Wrap(err, "creating a service account")
}
err = r.createCopierRoleBinding(ctx, objMeta)
if err != nil {
return errors.Wrap(err, "creating a role binding for the copy-role")
}
return err
}
// removeRBAC deletes the role binding between the service account of this artifact
// and the CopierRole. The ServiceAccount is removed automatically through the Owner
// relationship with the OSArtifact. The RoleBinding can't have it as an owner
// because it is in a different Namespace.
func (r *OSArtifactReconciler) removeRBAC(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
err := r.clientSet.RbacV1().RoleBindings(r.ArtifactPodInfo.Namespace).
Delete(ctx, artifact.Name, metav1.DeleteOptions{})
// Ignore not found. No need to do anything.
if err != nil && apierrors.IsNotFound(err) {
return nil
}
return err
}
func (r *OSArtifactReconciler) removeArtifacts(ctx context.Context, artifact buildv1alpha1.OSArtifact) error {
//Finding Pods using labels
fmt.Printf("r.ArtifactPodInfo = %+v\n", r.ArtifactPodInfo.Label)
pods, err := r.clientSet.CoreV1().Pods(r.ArtifactPodInfo.Namespace).
List(ctx, metav1.ListOptions{LabelSelector: r.ArtifactPodInfo.Label})
if err != nil {
return errors.Wrap(err, fmt.Sprintf("listing pods with label %s in namespace %s", r.ArtifactPodInfo.Label, r.ArtifactPodInfo.Namespace))
}
if len(pods.Items) < 1 {
return errors.New("No artifact pod found")
}
pod := pods.Items[0]
stdout, stderr, err := r.executeRemoteCommand(r.ArtifactPodInfo.Namespace, pod.Name, fmt.Sprintf("rm -rf %s/%s.*", r.ArtifactPodInfo.Path, artifact.Name))
if err != nil {
return errors.Wrap(err, fmt.Sprintf("%s\n%s", stdout, stderr))
}
return nil
}
func (r *OSArtifactReconciler) executeRemoteCommand(namespace, podName, command string) (string, string, error) {
buf := &bytes.Buffer{}
errBuf := &bytes.Buffer{}
request := r.clientSet.CoreV1().RESTClient().
Post().
Namespace(namespace).
Resource("pods").
Name(podName).
SubResource("exec").
VersionedParams(&v1.PodExecOptions{
Command: []string{"/bin/sh", "-c", command},
Stdin: false,
Stdout: true,
Stderr: true,
TTY: true,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(r.restConfig, "POST", request.URL())
if err != nil {
return "", "", err
}
err = exec.Stream(remotecommand.StreamOptions{
Stdout: buf,
Stderr: errBuf,
})
if err != nil {
return "", "", fmt.Errorf("%w Failed executing command %s on %v/%v", err, command, namespace, podName)
}
return buf.String(), errBuf.String(), nil
}

View File

@@ -19,315 +19,230 @@ package controllers
import (
"context"
"fmt"
"time"
osbuilder "github.com/kairos-io/osbuilder/api/v1alpha2"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
FinalizerName = "build.kairos.io/osbuilder-finalizer"
artifactLabel = "build.kairos.io/artifact"
artifactExporterIndexAnnotation = "build.kairos.io/export-index"
)
const FinalizerName = "build.kairos.io/osbuilder-finalizer"
type ArtifactPodInfo struct {
Label string
Namespace string
Path string
Role string
}
// OSArtifactReconciler reconciles a OSArtifact object
type OSArtifactReconciler struct {
client.Client
Scheme *runtime.Scheme
restConfig *rest.Config
clientSet *kubernetes.Clientset
ServingImage, ToolImage, CopierImage string
ArtifactPodInfo ArtifactPodInfo
}
func (r *OSArtifactReconciler) InjectClient(c client.Client) error {
r.Client = c
func genObjectMeta(artifact buildv1alpha1.OSArtifact) metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: artifact.Name,
Namespace: artifact.Namespace,
OwnerReferences: genOwner(artifact),
}
}
return nil
func genOwner(artifact buildv1alpha1.OSArtifact) []metav1.OwnerReference {
return []metav1.OwnerReference{
*metav1.NewControllerRef(&artifact.ObjectMeta, schema.GroupVersionKind{
Group: buildv1alpha1.GroupVersion.Group,
Version: buildv1alpha1.GroupVersion.Version,
Kind: "OSArtifact",
}),
}
}
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=build.kairos.io,resources=osartifacts/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;delete
//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;create;delete;watch
//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;create;
//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;delete
// TODO: Is this ^ how I should have created rbac permissions for the controller?
// - git commit all changes
// - generate code with kubebuilder
// - check if my permissions were removed
// - do it properly
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the OSArtifact object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile
func (r *OSArtifactReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
var artifact osbuilder.OSArtifact
if err := r.Get(ctx, req.NamespacedName, &artifact); err != nil {
var osbuild buildv1alpha1.OSArtifact
if err := r.Get(ctx, req.NamespacedName, &osbuild); err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{Requeue: true}, err
return ctrl.Result{}, err
}
if artifact.DeletionTimestamp != nil {
controllerutil.RemoveFinalizer(&artifact, FinalizerName)
return ctrl.Result{}, r.Update(ctx, &artifact)
logger.Info(fmt.Sprintf("Reconciling %v", osbuild))
stop, err := r.handleFinalizer(ctx, &osbuild)
if err != nil || stop {
return ctrl.Result{}, err
}
if !controllerutil.ContainsFinalizer(&artifact, FinalizerName) {
controllerutil.AddFinalizer(&artifact, FinalizerName)
if err := r.Update(ctx, &artifact); err != nil {
return ctrl.Result{Requeue: true}, err
// generate configmap required for building a custom image
desiredConfigMap := r.genConfigMap(osbuild)
logger.Info(fmt.Sprintf("Checking configmap %v", osbuild))
cfgMap, err := r.clientSet.CoreV1().ConfigMaps(req.Namespace).Get(ctx, desiredConfigMap.Name, metav1.GetOptions{})
if cfgMap == nil || apierrors.IsNotFound(err) {
logger.Info(fmt.Sprintf("Creating config map %v", desiredConfigMap))
_, err = r.clientSet.CoreV1().ConfigMaps(req.Namespace).Create(ctx, desiredConfigMap, metav1.CreateOptions{})
if err != nil {
logger.Error(err, "Failed while creating config map")
return ctrl.Result{}, err
}
return ctrl.Result{Requeue: true}, err
}
logger.Info(fmt.Sprintf("Reconciling %s/%s", artifact.Namespace, artifact.Name))
switch artifact.Status.Phase {
case osbuilder.Exporting:
return r.checkExport(ctx, &artifact)
case osbuilder.Ready, osbuilder.Error:
return ctrl.Result{}, nil
default:
return r.checkBuild(ctx, &artifact)
}
}
// CreateConfigMap generates a configmap required for building a custom image
func (r *OSArtifactReconciler) CreateConfigMap(ctx context.Context, artifact *osbuilder.OSArtifact) error {
cm := r.genConfigMap(artifact)
if cm.Labels == nil {
cm.Labels = map[string]string{}
}
cm.Labels[artifactLabel] = artifact.Name
if err := controllerutil.SetOwnerReference(artifact, cm, r.Scheme()); err != nil {
return err
}
if err := r.Create(ctx, cm); err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return nil
}
func (r *OSArtifactReconciler) createPVC(ctx context.Context, artifact *osbuilder.OSArtifact) (*corev1.PersistentVolumeClaim, error) {
pvc := r.newArtifactPVC(artifact)
if pvc.Labels == nil {
pvc.Labels = map[string]string{}
}
pvc.Labels[artifactLabel] = artifact.Name
if err := controllerutil.SetOwnerReference(artifact, pvc, r.Scheme()); err != nil {
return pvc, err
}
if err := r.Create(ctx, pvc); err != nil {
return pvc, err
}
return pvc, nil
}
func (r *OSArtifactReconciler) createBuilderPod(ctx context.Context, artifact *osbuilder.OSArtifact, pvc *corev1.PersistentVolumeClaim) (*corev1.Pod, error) {
pod := r.newBuilderPod(pvc.Name, artifact)
if pod.Labels == nil {
pod.Labels = map[string]string{}
}
pod.Labels[artifactLabel] = artifact.Name
if err := controllerutil.SetOwnerReference(artifact, pod, r.Scheme()); err != nil {
return pod, err
}
if err := r.Create(ctx, pod); err != nil {
return pod, err
}
return pod, nil
}
func (r *OSArtifactReconciler) startBuild(ctx context.Context, artifact *osbuilder.OSArtifact) (ctrl.Result, error) {
err := r.CreateConfigMap(ctx, artifact)
if err != nil {
return ctrl.Result{Requeue: true}, err
}
pvc, err := r.createPVC(ctx, artifact)
logger.Info(fmt.Sprintf("Checking deployment %v", osbuild))
err = r.createRBAC(ctx, osbuild)
if err != nil {
return ctrl.Result{Requeue: true}, err
}
_, err = r.createBuilderPod(ctx, artifact, pvc)
if err != nil {
return ctrl.Result{Requeue: true}, err
}
desiredJob := r.genJob(osbuild)
job, err := r.clientSet.BatchV1().Jobs(req.Namespace).Get(ctx, desiredJob.Name, metav1.GetOptions{})
if job == nil || apierrors.IsNotFound(err) {
logger.Info(fmt.Sprintf("Creating Job %v", job))
artifact.Status.Phase = osbuilder.Building
if err := r.Status().Update(ctx, artifact); err != nil {
return ctrl.Result{Requeue: true}, err
}
return ctrl.Result{}, nil
}
func (r *OSArtifactReconciler) checkBuild(ctx context.Context, artifact *osbuilder.OSArtifact) (ctrl.Result, error) {
var pods corev1.PodList
if err := r.List(ctx, &pods, &client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{
artifactLabel: artifact.Name,
}),
}); err != nil {
return ctrl.Result{Requeue: true}, err
}
for _, pod := range pods.Items {
switch pod.Status.Phase {
case corev1.PodSucceeded:
artifact.Status.Phase = osbuilder.Exporting
return ctrl.Result{Requeue: true}, r.Status().Update(ctx, artifact)
case corev1.PodFailed:
artifact.Status.Phase = osbuilder.Error
return ctrl.Result{Requeue: true}, r.Status().Update(ctx, artifact)
case corev1.PodPending, corev1.PodRunning:
_, err = r.clientSet.BatchV1().Jobs(req.Namespace).Create(ctx, desiredJob, metav1.CreateOptions{})
if err != nil {
logger.Error(err, "Failed while creating job")
return ctrl.Result{}, nil
}
return ctrl.Result{Requeue: true}, nil
}
return r.startBuild(ctx, artifact)
}
func (r *OSArtifactReconciler) checkExport(ctx context.Context, artifact *osbuilder.OSArtifact) (ctrl.Result, error) {
var jobs batchv1.JobList
if err := r.List(ctx, &jobs, &client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{
artifactLabel: artifact.Name,
}),
}); err != nil {
if err != nil {
return ctrl.Result{Requeue: true}, err
}
indexedJobs := make(map[string]*batchv1.Job, len(artifact.Spec.Exporters))
for _, job := range jobs.Items {
if job.GetAnnotations() != nil {
if idx, ok := job.GetAnnotations()[artifactExporterIndexAnnotation]; ok {
indexedJobs[idx] = &job
}
}
logger.Info(fmt.Sprintf("Updating state %v", osbuild))
copy := osbuild.DeepCopy()
helper, err := patch.NewHelper(&osbuild, r.Client)
if err != nil {
return ctrl.Result{}, err
}
if job.Status.Succeeded > 0 {
copy.Status.Phase = "Ready"
} else if copy.Status.Phase != "Building" {
copy.Status.Phase = "Building"
}
ctx, cancel := context.WithTimeout(context.Background(), 180*time.Second)
defer cancel()
if err := helper.Patch(ctx, copy); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "couldn't patch osbuild %q", copy.Name)
}
var pvcs corev1.PersistentVolumeClaimList
var pvc *corev1.PersistentVolumeClaim
if err := r.List(ctx, &pvcs, &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{artifactLabel: artifact.Name})}); err != nil {
return ctrl.Result{Requeue: true}, err
}
for _, item := range pvcs.Items {
pvc = &item
break
}
if pvc == nil {
log.FromContext(ctx).Error(nil, "failed to locate pvc for artifact, this should not happen")
return ctrl.Result{}, fmt.Errorf("failed to locate artifact pvc")
}
var succeeded int
for i := range artifact.Spec.Exporters {
idx := fmt.Sprintf("%d", i)
job := indexedJobs[idx]
if job == nil {
job = &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-export-%s", artifact.Name, idx),
Namespace: artifact.Namespace,
Annotations: map[string]string{
artifactExporterIndexAnnotation: idx,
},
Labels: map[string]string{
artifactLabel: artifact.Name,
},
},
Spec: artifact.Spec.Exporters[i],
}
job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, corev1.Volume{
Name: "artifacts",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
ReadOnly: true,
},
},
})
if err := controllerutil.SetOwnerReference(artifact, job, r.Scheme()); err != nil {
return ctrl.Result{Requeue: true}, err
}
if err := r.Create(ctx, job); err != nil {
return ctrl.Result{Requeue: true}, err
}
} else if job.Spec.Completions == nil || *job.Spec.Completions == 1 {
if job.Status.Succeeded > 0 {
succeeded++
}
} else if *job.Spec.BackoffLimit <= job.Status.Failed {
artifact.Status.Phase = osbuilder.Error
if err := r.Status().Update(ctx, artifact); err != nil {
return ctrl.Result{Requeue: true}, err
}
break
}
}
if succeeded == len(artifact.Spec.Exporters) {
artifact.Status.Phase = osbuilder.Ready
if err := r.Status().Update(ctx, artifact); err != nil {
return ctrl.Result{Requeue: true}, err
}
}
// for _, c := range append(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses...) {
// if c.State.Terminated != nil && c.State.Terminated.ExitCode != 0 {
// packageBuildCopy.Status.State = "Failed"
// }
// }
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *OSArtifactReconciler) SetupWithManager(mgr ctrl.Manager) error {
cfg := mgr.GetConfig()
clientset, err := kubernetes.NewForConfig(cfg)
if err != nil {
return err
}
r.restConfig = cfg
r.clientSet = clientset
return ctrl.NewControllerManagedBy(mgr).
For(&osbuilder.OSArtifact{}).
Owns(&osbuilder.OSArtifact{}).
Watches(
&source.Kind{Type: &corev1.Pod{}},
handler.EnqueueRequestsFromMapFunc(r.findOwningArtifact),
builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}),
).
Watches(
&source.Kind{Type: &batchv1.Job{}},
handler.EnqueueRequestsFromMapFunc(r.findOwningArtifact),
builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}),
).
For(&buildv1alpha1.OSArtifact{}).
Complete(r)
}
func (r *OSArtifactReconciler) findOwningArtifact(obj client.Object) []reconcile.Request {
if obj.GetLabels() == nil {
return nil
// Returns true if reconciliation should stop or false otherwise
func (r *OSArtifactReconciler) handleFinalizer(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) (bool, error) {
// examine DeletionTimestamp to determine if object is under deletion
if osbuild.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
if !controllerutil.ContainsFinalizer(osbuild, FinalizerName) {
controllerutil.AddFinalizer(osbuild, FinalizerName)
if err := r.Update(ctx, osbuild); err != nil {
return true, err
}
}
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(osbuild, FinalizerName) {
// our finalizer is present, so lets handle any external dependency
if err := r.finalize(ctx, osbuild); err != nil {
// if fail to delete the external dependency here, return with error
// so that it can be retried
return true, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(osbuild, FinalizerName)
if err := r.Update(ctx, osbuild); err != nil {
return true, err
}
}
// Stop reconciliation as the item is being deleted
return true, nil
}
if artifactName, ok := obj.GetLabels()[artifactLabel]; ok {
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Name: artifactName,
Namespace: obj.GetNamespace(),
},
},
}
return false, nil
}
// - Remove artifacts from the server Pod
// - Delete role-binding (because it doesn't have the OSArtifact as an owner and won't be deleted automatically)
func (r *OSArtifactReconciler) finalize(ctx context.Context, osbuild *buildv1alpha1.OSArtifact) error {
if err := r.removeRBAC(ctx, *osbuild); err != nil {
return err
}
if err := r.removeArtifacts(ctx, *osbuild); err != nil {
return err
}
return nil

View File

@@ -1,173 +0,0 @@
package controllers
import (
"context"
"fmt"
"time"
osbuilder "github.com/kairos-io/osbuilder/api/v1alpha2"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/phayes/freeport"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
)
var _ = Describe("OSArtifactReconciler", func() {
var r *OSArtifactReconciler
var artifact *osbuilder.OSArtifact
var namespace string
var restConfig *rest.Config
var clientset *kubernetes.Clientset
var err error
BeforeEach(func() {
restConfig = ctrl.GetConfigOrDie()
clientset, err = kubernetes.NewForConfig(restConfig)
Expect(err).ToNot(HaveOccurred())
namespace = createRandomNamespace(clientset)
artifact = &osbuilder.OSArtifact{
TypeMeta: metav1.TypeMeta{
Kind: "OSArtifact",
APIVersion: osbuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: randStringRunes(10),
},
}
scheme := runtime.NewScheme()
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(osbuilder.AddToScheme(scheme))
metricsPort, err := freeport.GetFreePort()
Expect(err).ToNot(HaveOccurred())
fmt.Printf("metricsPort = %+v\n", metricsPort)
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: fmt.Sprintf("127.0.0.1:%d", metricsPort),
})
Expect(err).ToNot(HaveOccurred())
r = &OSArtifactReconciler{
ToolImage: "quay.io/kairos/osbuilder-tools:latest",
}
err = (r).SetupWithManager(mgr)
Expect(err).ToNot(HaveOccurred())
})
JustBeforeEach(func() {
k8s := dynamic.NewForConfigOrDie(restConfig)
artifacts := k8s.Resource(
schema.GroupVersionResource{
Group: osbuilder.GroupVersion.Group,
Version: osbuilder.GroupVersion.Version,
Resource: "osartifacts"}).Namespace(namespace)
uArtifact := unstructured.Unstructured{}
uArtifact.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(artifact)
resp, err := artifacts.Create(context.TODO(), &uArtifact, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Update the local object with the one fetched from k8s
err = runtime.DefaultUnstructuredConverter.FromUnstructured(resp.Object, artifact)
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
deleteNamepace(clientset, namespace)
})
Describe("CreateConfigMap", func() {
It("creates a ConfigMap with no error", func() {
ctx := context.Background()
err := r.CreateConfigMap(ctx, artifact)
Expect(err).ToNot(HaveOccurred())
c, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), artifact.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(c).ToNot(BeNil())
})
})
Describe("CreateBuilderPod", func() {
When("BaseImageDockerfile is set", func() {
BeforeEach(func() {
secretName := artifact.Name + "-dockerfile"
_, err := clientset.CoreV1().Secrets(namespace).Create(context.TODO(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
StringData: map[string]string{
"Dockerfile": "FROM ubuntu",
},
Type: "Opaque",
}, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
artifact.Spec.BaseImageDockerfile = &osbuilder.SecretKeySelector{
Name: secretName,
Key: "Dockerfile",
}
// Whatever, just to let it work
artifact.Spec.ImageName = "quay.io/kairos-ci/" + artifact.Name + ":latest"
})
It("creates an Init Container to build the image", func() {
pvc, err := r.createPVC(context.TODO(), artifact)
Expect(err).ToNot(HaveOccurred())
pod, err := r.createBuilderPod(context.TODO(), artifact, pvc)
Expect(err).ToNot(HaveOccurred())
By("checking if an init container was created")
initContainerNames := []string{}
for _, c := range pod.Spec.InitContainers {
initContainerNames = append(initContainerNames, c.Name)
}
Expect(initContainerNames).To(ContainElement("kaniko-build"))
By("checking if init containers complete successfully")
Eventually(func() bool {
p, err := clientset.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
var allReady = false
if len(p.Status.InitContainerStatuses) > 0 {
allReady = true
}
for _, c := range p.Status.InitContainerStatuses {
allReady = allReady && c.Ready
}
return allReady
}, 2*time.Minute, 5*time.Second).Should(BeTrue())
// req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{})
// podLogs, err := req.Stream(context.TODO())
// Expect(err).ToNot(HaveOccurred())
// defer podLogs.Close()
// buf := new(bytes.Buffer)
// _, err = io.Copy(buf, podLogs)
// Expect(err).ToNot(HaveOccurred())
// str := buf.String()
// fmt.Printf("str = %+v\n", str)
})
})
})
})

View File

@@ -17,24 +17,20 @@ limitations under the License.
package controllers
import (
"context"
"math/rand"
"path/filepath"
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
buildv1alpha2 "github.com/kairos-io/osbuilder/api/v1alpha2"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
//+kubebuilder:scaffold:imports
)
@@ -47,7 +43,10 @@ var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func() {
@@ -65,7 +64,7 @@ var _ = BeforeSuite(func() {
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = buildv1alpha2.AddToScheme(scheme.Scheme)
err = buildv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
@@ -74,36 +73,10 @@ var _ = BeforeSuite(func() {
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
})
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})
func randStringRunes(n int) string {
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz")
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
func createRandomNamespace(clientset *kubernetes.Clientset) string {
name := randStringRunes(10)
_, err := clientset.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
return name
}
func deleteNamepace(clientset *kubernetes.Clientset, name string) {
err := clientset.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
}

31
go.mod
View File

@@ -3,12 +3,15 @@ module github.com/kairos-io/osbuilder
go 1.18
require (
github.com/onsi/ginkgo/v2 v2.14.0
github.com/onsi/gomega v1.30.0
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.9.5
github.com/onsi/gomega v1.27.7
github.com/pkg/errors v0.9.1
github.com/rancher-sandbox/ele-testhelpers v0.0.0-20220614101555-2eddf3b113e2
k8s.io/api v0.24.0
k8s.io/apimachinery v0.24.0
k8s.io/client-go v0.24.0
sigs.k8s.io/cluster-api v1.1.4
sigs.k8s.io/controller-runtime v0.12.1
)
@@ -23,23 +26,25 @@ require (
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gobuffalo/flect v0.2.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/google/uuid v1.1.2 // indirect
@@ -48,10 +53,11 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
@@ -60,18 +66,19 @@ require (
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.16.1 // indirect
golang.org/x/tools v0.9.1 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.24.0 // indirect

120
go.sum
View File

@@ -60,6 +60,10 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -73,11 +77,14 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e h1:GCzyKMDDjSGnlpl3clrdAK7I1AaVoaiKDOYkUzChZzg=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@@ -88,6 +95,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
@@ -107,6 +116,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0=
github.com/coredns/corefile-migration v1.0.14 h1:Tz3WZhoj2NdP8drrQH86NgnCng+VrPjNeg2Oe1ALKag=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
@@ -123,8 +134,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
@@ -140,6 +153,7 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@@ -163,11 +177,10 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -180,8 +193,12 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gobuffalo/flect v0.2.4 h1:BSYA8+T60cdyq+vynaSUjqSVI9mDEg9ZfQUXKmfjo4I=
github.com/gobuffalo/flect v0.2.4/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
@@ -221,6 +238,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
@@ -228,6 +246,7 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.10.1 h1:MQBGSZGnDwh7T/un+mzGKOMz3x+4E/GDPprWjDL+1Jg=
github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
@@ -243,11 +262,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -267,6 +285,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
@@ -306,6 +325,7 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
@@ -354,6 +374,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
@@ -362,6 +383,9 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -381,37 +405,35 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY=
github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -448,6 +470,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rancher-sandbox/ele-testhelpers v0.0.0-20220614101555-2eddf3b113e2 h1:amWBHqvIh/rmaDTuHfoGVAjmnQoZHq+jGuLAmHQhpyM=
github.com/rancher-sandbox/ele-testhelpers v0.0.0-20220614101555-2eddf3b113e2/go.mod h1:rZj2a+V44LvtFVH/vsFXtHYIWMP1Q9aSrl6RGLbk49A=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@@ -455,6 +479,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -470,6 +495,7 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@@ -477,6 +503,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -554,12 +581,6 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -596,7 +617,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -640,20 +660,16 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -727,6 +743,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -748,27 +765,15 @@ golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -777,15 +782,10 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -844,6 +844,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
@@ -852,20 +853,14 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 h1:hI3jKY4Hpf63ns040onEbB3dAkR/H/P83hw1TG8dD3Y=
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
@@ -957,6 +952,7 @@ google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKr
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -996,6 +992,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
@@ -1027,6 +1024,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -1045,9 +1043,11 @@ k8s.io/apiextensions-apiserver v0.24.0 h1:JfgFqbA8gKJ/uDT++feAqk9jBIwNnL9YGdQvaI
k8s.io/apiextensions-apiserver v0.24.0/go.mod h1:iuVe4aEpe6827lvO6yWQVxiPSpPoSKVjkq+MIdg84cM=
k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ=
k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apiserver v0.24.0 h1:GR7kGsjOMfilRvlG3Stxv/3uz/ryvJ/aZXc5pqdsNV0=
k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA=
k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U=
k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw=
k8s.io/cluster-bootstrap v0.23.0 h1:8pZuuAWPoygewSNB4IddX3HBwXcQkPDXL/ca7GtGf4o=
k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.24.0 h1:h5jieHZQoHrY/lHG+HyrSbJeyfuitheBvqvKwKHVC0g=
k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA=
@@ -1066,6 +1066,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/cluster-api v1.1.4 h1:mD44GELeiMfQKxyBmsxdYn/FmNAIkFWr2auM++qxx1A=
sigs.k8s.io/cluster-api v1.1.4/go.mod h1:luiK6hLylbm4Kc7dkRw6KPAvNum9sAqpz5DfkdCaSW8=
sigs.k8s.io/controller-runtime v0.12.1 h1:4BJY01xe9zKQti8oRjj/NeHKRXthf1YkYJAgLONFFoI=
sigs.k8s.io/controller-runtime v0.12.1/go.mod h1:BKhxlA4l7FPK4AQcsuL4X6vZeWnKDXez/vp1Y8dxTU0=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=

21
main.go
View File

@@ -31,7 +31,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
buildv1alpha2 "github.com/kairos-io/osbuilder/api/v1alpha2"
buildv1alpha1 "github.com/kairos-io/osbuilder/api/v1alpha1"
"github.com/kairos-io/osbuilder/controllers"
//+kubebuilder:scaffold:imports
)
@@ -44,7 +44,7 @@ var (
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(buildv1alpha2.AddToScheme(scheme))
utilruntime.Must(buildv1alpha1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
@@ -53,12 +53,21 @@ func main() {
var enableLeaderElection bool
var probeAddr string
var serveImage, toolImage, copierImage string
var copyToPodLabel, copyToNamespace, copyToPath, copierRole string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&copierImage, "copier-image", "quay.io/kairos/kubectl", "The image that is used to copy artifacts to the server pod.")
flag.StringVar(&serveImage, "serve-image", "nginx", "Serve image.")
// It needs luet inside
flag.StringVar(&toolImage, "tool-image", "quay.io/kairos/osbuilder-tools:latest", "Tool image.")
// Information on where to copy the artifacts
flag.StringVar(&copyToPodLabel, "copy-to-pod-label", "", "The label of the Pod to which artifacts should be copied.")
flag.StringVar(&copyToNamespace, "copy-to-namespace", "", "The namespace of the copy-to-pod-label Pod.")
flag.StringVar(&copyToPath, "copy-to-path", "", "The path under which to copy artifacts in the copy-to-pod-label Pod.")
flag.StringVar(&copierRole, "copy-role", "", "The name or the Kubernetes Role that has the permissions to copy artifacts to the copy-to-pod-label Pod")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
@@ -96,9 +105,17 @@ func main() {
}
if err = (&controllers.OSArtifactReconciler{
Client: mgr.GetClient(),
ServingImage: serveImage,
ToolImage: toolImage,
CopierImage: copierImage,
ArtifactPodInfo: controllers.ArtifactPodInfo{
Label: copyToPodLabel,
Namespace: copyToNamespace,
Path: copyToPath,
Role: copierRole,
},
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OSArtifact")
os.Exit(1)

View File

@@ -2,17 +2,5 @@
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base"
],
"schedule": [
"after 11pm every weekday",
"before 7am every weekday",
"every weekend"
],
"timezone": "Europe/Brussels",
"packageRules": [
{
"matchUpdateTypes": ["patch"],
"automerge": true
}
]
}

View File

@@ -26,8 +26,7 @@ set -e
kubectl cluster-info --context kind-$CLUSTER_NAME
echo "Sleep to give times to node to populate with all info"
kubectl wait --for=condition=Ready node/$CLUSTER_NAME-control-plane
EXTERNAL_IP=$(kubectl get nodes -o jsonpath='{.items[].status.addresses[?(@.type == "InternalIP")].address}')
export EXTERNAL_IP
export EXTERNAL_IP=$(kubectl get nodes -o jsonpath='{.items[].status.addresses[?(@.type == "InternalIP")].address}')
export BRIDGE_IP="172.18.0.1"
kubectl get nodes -o wide
cd $ROOT_DIR/tests && $GINKGO -r -v ./e2e

View File

@@ -1,162 +1,114 @@
package e2e_test
import (
"context"
"bytes"
"fmt"
"os/exec"
"strings"
"time"
osbuilder "github.com/kairos-io/osbuilder/api/v1alpha2"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
ctrl "sigs.k8s.io/controller-runtime"
kubectl "github.com/rancher-sandbox/ele-testhelpers/kubectl"
)
var _ = Describe("ISO build test", func() {
var artifactName string
var artifacts, pods, pvcs, jobs dynamic.ResourceInterface
var scheme *runtime.Scheme
var artifactLabelSelector labels.Selector
//k := kubectl.New()
Context("registration", func() {
BeforeEach(func() {
k8s := dynamic.NewForConfigOrDie(ctrl.GetConfigOrDie())
scheme = runtime.NewScheme()
err := osbuilder.AddToScheme(scheme)
Expect(err).ToNot(HaveOccurred())
AfterEach(func() {
kubectl.New().Delete("osartifacts", "-n", "default", "hello-kairos")
})
artifacts = k8s.Resource(schema.GroupVersionResource{Group: osbuilder.GroupVersion.Group, Version: osbuilder.GroupVersion.Version, Resource: "osartifacts"}).Namespace("default")
pods = k8s.Resource(schema.GroupVersionResource{Group: corev1.GroupName, Version: corev1.SchemeGroupVersion.Version, Resource: "pods"}).Namespace("default")
pvcs = k8s.Resource(schema.GroupVersionResource{Group: corev1.GroupName, Version: corev1.SchemeGroupVersion.Version, Resource: "persistentvolumeclaims"}).Namespace("default")
jobs = k8s.Resource(schema.GroupVersionResource{Group: batchv1.GroupName, Version: batchv1.SchemeGroupVersion.Version, Resource: "jobs"}).Namespace("default")
artifact := &osbuilder.OSArtifact{
TypeMeta: metav1.TypeMeta{
Kind: "OSArtifact",
APIVersion: osbuilder.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "simple-",
},
Spec: osbuilder.OSArtifactSpec{
ImageName: "quay.io/kairos/core-opensuse:latest",
ISO: true,
DiskSize: "",
Exporters: []batchv1.JobSpec{
{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Name: "test",
Image: "debian:latest",
Command: []string{"bash"},
Args: []string{"-xec", "[ -f /artifacts/*.iso ]"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "artifacts",
ReadOnly: true,
MountPath: "/artifacts",
},
},
},
},
},
},
},
},
},
}
uArtifact := unstructured.Unstructured{}
uArtifact.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(artifact)
resp, err := artifacts.Create(context.TODO(), &uArtifact, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
artifactName = resp.GetName()
artifactLabelSelectorReq, err := labels.NewRequirement("build.kairos.io/artifact", selection.Equals, []string{artifactName})
Expect(err).ToNot(HaveOccurred())
artifactLabelSelector = labels.NewSelector().Add(*artifactLabelSelectorReq)
})
It("works", func() {
By("starting the build")
Eventually(func(g Gomega) {
w, err := pods.Watch(context.TODO(), metav1.ListOptions{LabelSelector: artifactLabelSelector.String()})
It("creates a simple iso", func() {
err := kubectl.Apply("", "../../tests/fixtures/simple.yaml")
Expect(err).ToNot(HaveOccurred())
var stopped bool
for !stopped {
event, ok := <-w.ResultChan()
itHasTheCorrectImage()
itHasTheCorrectLabels()
itCopiesTheArtifacts()
stopped = event.Type != watch.Deleted && event.Type != watch.Error || !ok
}
}).WithTimeout(time.Hour).Should(Succeed())
By("deleting the custom resource", func() {
err = kubectl.New().Delete("osartifacts", "-n", "default", "hello-kairos")
Expect(err).ToNot(HaveOccurred())
})
By("exporting the artifacts")
Eventually(func(g Gomega) {
w, err := jobs.Watch(context.TODO(), metav1.ListOptions{LabelSelector: artifactLabelSelector.String()})
Expect(err).ToNot(HaveOccurred())
var stopped bool
for !stopped {
event, ok := <-w.ResultChan()
stopped = event.Type != watch.Deleted && event.Type != watch.Error || !ok
}
}).WithTimeout(time.Hour).Should(Succeed())
By("building the artifacts successfully")
Eventually(func(g Gomega) {
w, err := artifacts.Watch(context.TODO(), metav1.ListOptions{})
Expect(err).ToNot(HaveOccurred())
var artifact osbuilder.OSArtifact
var stopped bool
for !stopped {
event, ok := <-w.ResultChan()
stopped = !ok
if event.Type == watch.Modified && event.Object.(*unstructured.Unstructured).GetName() == artifactName {
err := scheme.Convert(event.Object, &artifact, nil)
Expect(err).ToNot(HaveOccurred())
stopped = artifact.Status.Phase == osbuilder.Ready
}
}
}).WithTimeout(time.Hour).Should(Succeed())
By("cleaning up resources on deletion")
err := artifacts.Delete(context.TODO(), artifactName, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
Eventually(func(g Gomega) int {
res, err := artifacts.List(context.TODO(), metav1.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(res.Items)
}).WithTimeout(time.Minute).Should(Equal(0))
Eventually(func(g Gomega) int {
res, err := pods.List(context.TODO(), metav1.ListOptions{LabelSelector: artifactLabelSelector.String()})
Expect(err).ToNot(HaveOccurred())
return len(res.Items)
}).WithTimeout(time.Minute).Should(Equal(0))
Eventually(func(g Gomega) int {
res, err := pvcs.List(context.TODO(), metav1.ListOptions{LabelSelector: artifactLabelSelector.String()})
Expect(err).ToNot(HaveOccurred())
return len(res.Items)
}).WithTimeout(time.Minute).Should(Equal(0))
Eventually(func(g Gomega) int {
res, err := jobs.List(context.TODO(), metav1.ListOptions{LabelSelector: artifactLabelSelector.String()})
Expect(err).ToNot(HaveOccurred())
return len(res.Items)
}).WithTimeout(time.Minute).Should(Equal(0))
itCleansUpRoleBindings()
itDeletesTheArtifacts()
})
})
})
func itHasTheCorrectImage() {
Eventually(func() string {
b, _ := kubectl.GetData("default", "osartifacts", "hello-kairos", "jsonpath={.spec.imageName}")
fmt.Printf("looking for image core-opensuse:latest = %+v\n", string(b))
return string(b)
}, 2*time.Minute, 2*time.Second).Should(Equal("quay.io/kairos/core-opensuse:latest"))
}
func itHasTheCorrectLabels() {
Eventually(func() string {
b, _ := kubectl.GetData("default", "jobs", "hello-kairos", "jsonpath={.spec.template.metadata.labels.osbuild}")
fmt.Printf("looking for label workloadhello-kairos = %+v\n", string(b))
return string(b)
}, 2*time.Minute, 2*time.Second).Should(Equal("workloadhello-kairos"))
}
func itCopiesTheArtifacts() {
nginxNamespace := "osartifactbuilder-operator-system"
Eventually(func() string {
podName := strings.TrimSpace(findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx"))
out, _ := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html")
return out
}, 15*time.Minute, 2*time.Second).Should(MatchRegexp("hello-kairos.iso"))
}
func itCleansUpRoleBindings() {
nginxNamespace := "osartifactbuilder-operator-system"
Eventually(func() string {
rb := findRoleBindings(nginxNamespace)
return rb
}, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos"))
}
func itDeletesTheArtifacts() {
nginxNamespace := "osartifactbuilder-operator-system"
Eventually(func() string {
podName := findPodsWithLabel(nginxNamespace, "app.kubernetes.io/name=osbuilder-nginx")
out, err := kubectl.RunCommandWithOutput(nginxNamespace, podName, "ls /usr/share/nginx/html")
Expect(err).ToNot(HaveOccurred(), out)
return out
}, 3*time.Minute, 2*time.Second).ShouldNot(MatchRegexp("hello-kairos.iso"))
}
func findPodsWithLabel(namespace, label string) string {
kubectlCommand := fmt.Sprintf("kubectl get pods -n %s -l %s --no-headers -o custom-columns=\":metadata.name\" | head -n1", namespace, label)
cmd := exec.Command("bash", "-c", kubectlCommand)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
Expect(err).ToNot(HaveOccurred(), stderr.String())
return strings.TrimSpace(out.String())
}
func findRoleBindings(namespace string) string {
kubectlCommand := fmt.Sprintf("kubectl get rolebindings -n %s --no-headers -o custom-columns=\":metadata.name\"", namespace)
cmd := exec.Command("bash", "-c", kubectlCommand)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
Expect(err).ToNot(HaveOccurred(), stderr.String())
return strings.TrimSpace(out.String())
}

View File

@@ -31,7 +31,7 @@ spec:
fi
menuentry "install" --class os --unrestricted {
echo Loading kernel...
$linux ($root)/boot/kernel.xz cdroot root=live:CDLABEL=COS_LIVE rd.live.dir=/ rd.live.squashimg=rootfs.squashfs console=tty1 console=ttyS0 rd.cos.disable vga=795 nomodeset install-mode
$linux ($root)/boot/kernel.xz cdroot root=live:CDLABEL=COS_LIVE rd.live.dir=/ rd.live.squashimg=rootfs.squashfs console=tty1 console=ttyS0 rd.cos.disable vga=795 nomodeset nodepair.enable
echo Loading initrd...
$initrd ($root)/boot/rootfs.xz
}

View File

@@ -1,90 +1,90 @@
# https://quay.io/repository/kairos/packages?tab=tags&tag=latest
ARG LEAP_VERSION=15.5
ARG LUET_VERSION=0.35.0
ARG ENKI_VERSION=v0.0.16
ARG LEAP_VERSION=15.4
ARG LUET_VERSION=0.33.0
FROM quay.io/luet/base:$LUET_VERSION AS luet
FROM quay.io/kairos/enki:${ENKI_VERSION} as enki
FROM opensuse/leap:$LEAP_VERSION as default
RUN zypper ref && zypper dup -y
## ISO+ Arm image + Netboot + cloud images Build depedencies
RUN zypper ref && zypper in -y bc qemu-tools jq cdrtools docker git curl gptfdisk kpartx sudo xfsprogs parted util-linux-systemd e2fsprogs curl util-linux udev rsync grub2 dosfstools grub2-x86_64-efi squashfs mtools xorriso lvm2 zstd
FROM opensuse/leap:$LEAP_VERSION as luet-install
COPY --from=luet /usr/bin/luet /usr/bin/luet
ENV LUET_NOLOCK=true
ENV TMPDIR=/tmp
ARG TARGETARCH
# copy both arches
COPY luet-arm64.yaml /tmp/luet-arm64.yaml
COPY luet-amd64.yaml /tmp/luet-amd64.yaml
# Set the default luet config to the current build arch
RUN mkdir -p /etc/luet/
RUN cp /tmp/luet-${TARGETARCH}.yaml /etc/luet/luet.yaml
## Live CD artifacts
COPY luet.yaml /etc/luet/luet.yaml
RUN luet install -y system/elemental-cli
RUN luet install -y livecd/grub2 --system-target /grub2
RUN luet install -y livecd/grub2-efi-image --system-target /efi
# remove luet tmp files. Side effect of setting the system-target is that it treats it as a root fs
RUN rm -Rf /grub2/var
RUN rm -Rf /efi/var
## amd64 Live CD artifacts
FROM quay.io/kairos/packages:grub2-livecd-0.0.6 AS grub2
FROM quay.io/kairos/packages:grub2-efi-image-livecd-0.0.6 AS efi
## RPI64
RUN luet install -y firmware/u-boot-rpi64 firmware/raspberrypi-firmware firmware/raspberrypi-firmware-config firmware/raspberrypi-firmware-dt --system-target /rpi/
## Firmware is in the amd64 repo (noarch)
FROM quay.io/kairos/packages:u-boot-rpi64-firmware-2021.01-5.1 AS rpi-u-boot
FROM quay.io/kairos/packages:raspberrypi-firmware-firmware-2021.03.10-2.1 AS rpi-firmware
FROM quay.io/kairos/packages:raspberrypi-firmware-config-firmware-2021.03.10-2.1 AS rpi-firmware-config
FROM quay.io/kairos/packages:raspberrypi-firmware-dt-firmware-2021.03.15-2.1 AS rpi-firmware-dt
## PineBook64 Pro
RUN luet install -y arm-vendor-blob/u-boot-rockchip --system-target /pinebookpro/u-boot
FROM quay.io/kairos/packages:u-boot-rockchip-arm-vendor-blob-0.1 AS pinebook-u-boot
## Odroid fw
RUN luet install -y firmware/odroid-c2 --system-target /firmware/odroid-c2
## Generic ARM artifacts
FROM quay.io/kairos/packages-arm64:grub-efi-static-0.1 AS grub-efi
FROM quay.io/kairos/packages-arm64:grub-config-static-0.1 AS grub-config
FROM quay.io/kairos/packages-arm64:grub-artifacts-static-0.1 AS grub-artifacts
## RAW images for current arch
RUN luet install -y static/grub-efi --system-target /raw/grub
RUN luet install -y static/grub-config --system-target /raw/grubconfig
RUN luet install -y static/grub-artifacts --system-target /raw/grubartifacts
## RAW images
FROM quay.io/kairos/packages:grub-efi-static-0.1 AS grub-raw-efi
FROM quay.io/kairos/packages:grub-config-static-0.1 AS grub-raw-config
FROM quay.io/kairos/packages:grub-artifacts-static-0.1 AS grub-raw-artifacts
## RAW images for arm64
# Luet will install this artifacts from the current arch repo, so in x86 it will
# get them from the x86 repo and we want it to do it from the arm64 repo, even on x86
# so we use the arm64 luet config and use that to install those on x86
# This is being used by the prepare_arm_images.sh and build-arch-image.sh scripts
RUN luet install --config /tmp/luet-arm64.yaml -y static/grub-efi --system-target /arm/raw/grubefi
RUN luet install --config /tmp/luet-arm64.yaml -y static/grub-config --system-target /arm/raw/grubconfig
RUN luet install --config /tmp/luet-arm64.yaml -y static/grub-artifacts --system-target /arm/raw/grubartifacts
FROM opensuse/leap:$LEAP_VERSION
COPY --from=luet-install /usr/bin/elemental /usr/bin/elemental
COPY --from=luet /usr/bin/luet /usr/bin/luet
# kairos-agent so we can use the pull-image
RUN luet install -y system/kairos-agent
# ISO files
COPY --from=luet-install /grub2 /grub2
COPY --from=luet-install /efi /efi
# remove luet tmp files. Side effect of setting the system-target is that it treats it as a root fs
# so temporal files are stored in each dir
RUN rm -Rf /grub2/var/tmp
RUN rm -Rf /grub2/var/cache
RUN rm -Rf /efi/var/tmp
RUN rm -Rf /efi/var/cache
RUN rm -Rf /rpi/var/tmp
RUN rm -Rf /rpi/var/cache
RUN rm -Rf /pinebookpro/u-boot/var/tmp
RUN rm -Rf /pinebookpro/u-boot/var/cache
RUN rm -Rf /firmware/odroid-c2/var/tmp
RUN rm -Rf /firmware/odroid-c2/var/cache
RUN rm -Rf /raw/grub/var/tmp
RUN rm -Rf /raw/grub/var/cache
RUN rm -Rf /raw/grubconfig/var/tmp
RUN rm -Rf /raw/grubconfig/var/cache
RUN rm -Rf /raw/grubartifacts/var/tmp
RUN rm -Rf /raw/grubartifacts/var/cache
RUN rm -Rf /arm/raw/grubefi/var/tmp
RUN rm -Rf /arm/raw/grubefi/var/cache
RUN rm -Rf /arm/raw/grubconfig/var/tmp
RUN rm -Rf /arm/raw/grubconfig/var/cache
RUN rm -Rf /arm/raw/grubartifacts/var/tmp
RUN rm -Rf /arm/raw/grubartifacts/var/cache
# RAW images
COPY --from=grub-raw-efi / /raw/grub
COPY --from=grub-raw-config / /raw/grubconfig
COPY --from=grub-raw-artifacts / /raw/grubartifacts
# RPI64
COPY --from=rpi-u-boot / /rpi/u-boot
COPY --from=rpi-firmware / /rpi/rpi-firmware
COPY --from=rpi-firmware-config / /rpi/rpi-firmware-config
COPY --from=rpi-firmware-dt / /rpi/rpi-firmware-dt
# Pinebook
COPY --from=pinebook-u-boot / /pinebookpro/u-boot
# Generic
COPY --from=grub-efi / /arm/grub/efi
COPY --from=grub-config / /arm/grub/config
COPY --from=grub-artifacts / /arm/grub/artifacts
RUN zypper ref && zypper dup -y
## ISO Build depedencies
RUN zypper ref && zypper in -y xfsprogs parted util-linux-systemd e2fsprogs curl util-linux udev rsync grub2 dosfstools grub2-x86_64-efi squashfs mtools xorriso lvm2
RUN mkdir /config
# Arm image build deps
RUN zypper in -y jq docker git curl gptfdisk kpartx sudo
# Netboot
RUN zypper in -y cdrtools
# cloud images
RUN zypper in -y bc qemu-tools
# ISO build config
COPY ./config.yaml /config/manifest.yaml
COPY ./entrypoint.sh /entrypoint.sh
COPY ./add-cloud-init.sh /add-cloud-init.sh
COPY ./os-release.tmpl /os-release.tmpl
COPY ./ipxe.tmpl /ipxe.tmpl
COPY ./update-os-release.sh /update-os-release.sh
# ARM helpers
COPY ./build-arm-image.sh /build-arm-image.sh
@@ -99,6 +99,5 @@ COPY ./netboot.sh /netboot.sh
COPY defaults.yaml /defaults.yaml
COPY --from=enki /enki /usr/bin/enki
ENTRYPOINT [ "/entrypoint.sh" ]

View File

@@ -7,7 +7,15 @@ if [ -z "$image" ]; then
exit 1
fi
if [ ! -e "$WORKDIR/luet.yaml" ]; then
ls -liah $WORKDIR
echo "No valid config file"
cat "$WORKDIR/luet.yaml"
exit 1
fi
sudo luet install --config $WORKDIR/luet.yaml -y --system-target $WORKDIR firmware/odroid-c2
# conv=notrunc ?
dd if=/firmware/odroid-c2/bl1.bin.hardkernel of=$image conv=fsync bs=1 count=442
dd if=/firmware/odroid-c2/bl1.bin.hardkernel of=$image conv=fsync bs=512 skip=1 seek=1
dd if=/firmware/odroid-c2/u-boot.odroidc2 of=$image conv=fsync bs=512 seek=97
dd if=$WORKDIR/bl1.bin.hardkernel of=$image conv=fsync bs=1 count=442
dd if=$WORKDIR/bl1.bin.hardkernel of=$image conv=fsync bs=512 skip=1 seek=1
dd if=$WORKDIR/u-boot.odroidc2 of=$image conv=fsync bs=512 seek=97

View File

@@ -1,22 +0,0 @@
#!/bin/bash
partprobe
kpartx -va $DRIVE
image=$1
if [ -z "$image" ]; then
echo "No image specified"
exit 1
fi
set -ax
TEMPDIR="$(mktemp -d)"
echo $TEMPDIR
mount "${device}p1" "${TEMPDIR}"
# Copy all rpi files
cp -rfv /rpi/* $TEMPDIR
umount "${TEMPDIR}"

View File

@@ -16,7 +16,9 @@ TEMPDIR="$(mktemp -d)"
echo $TEMPDIR
mount "${device}p1" "${TEMPDIR}"
# Copy all rpi files
cp -rfv /rpi/* $TEMPDIR
for dir in /rpi/u-boot /rpi/rpi-firmware /rpi/rpi-firmware-config /rpi/rpi-firmware-dt
do
cp -rfv ${dir}/* $TEMPDIR
done
umount "${TEMPDIR}"

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Generates raw bootable images with qemu
set -ex
CLOUD_INIT=${1:-cloud_init.yaml}
QEMU=${QEMU:-qemu-system-x86_64}
ISO=${2:-iso.iso}
mkdir -p build
pushd build
touch meta-data
cp -rfv $CLOUD_INIT user-data
mkisofs -output ci.iso -volid cidata -joliet -rock user-data meta-data
truncate -s "+$((20000*1024*1024))" disk.raw
${QEMU} -m 8096 -smp cores=2 \
-nographic -cpu host \
-serial mon:stdio \
-rtc base=utc,clock=rt \
-chardev socket,path=qga.sock,server,nowait,id=qga0 \
-device virtio-serial \
-device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 \
-drive if=virtio,media=disk,file=disk.raw \
-drive format=raw,media=cdrom,readonly=on,file=$ISO \
-drive format=raw,media=cdrom,readonly=on,file=ci.iso \
-boot d \
-enable-kvm

View File

@@ -16,7 +16,6 @@ load_vars() {
oem_size="${OEM_SIZE:-64}"
recovery_size="${RECOVERY_SIZE:-2192}"
default_active_size="${DEFAULT_ACTIVE_SIZE:-2400}"
menu_entry="${DEFAULT_MENU_ENTRY:-Kairos}"
## Repositories
final_repo="${FINAL_REPO:-quay.io/costoolkit/releases-teal-arm64}"
@@ -71,10 +70,7 @@ cleanup() {
if [ -n "$oem" ]; then
umount $oem || true
fi
losetup -D "${LOOP}" || true;
dmsetup remove KairosVG-oem || true;
dmsetup remove KairosVG-recovery || true;
losetup -D || true
}
ensure_dir_structure() {
@@ -133,7 +129,7 @@ get_url()
esac
}
trap "cleanup" 1 2 3 6 14 15 EXIT
trap "cleanup" 1 2 3 6 9 14 15 EXIT
load_vars
@@ -217,12 +213,7 @@ while [ "$#" -gt 0 ]; do
shift 1
done
if [ "$model" == "rpi64" ];then
echo "rpi64 model not supported anymore, please select either rpi3 or rpi4"
exit 1
fi
if [ "$model" == "rpi3" ] || [ "$model" == "rpi4" ]; then
if [ "$model" == "rpi64" ]; then
container_image=${CONTAINER_IMAGE:-quay.io/costoolkit/examples:rpi-latest}
else
# Odroid C2 image contains kernel-default-extra, might have broader support
@@ -230,7 +221,6 @@ else
fi
if [ -n "$cos_config" ] && [ -e "$cos_config" ]; then
# shellcheck source=/dev/null
source "$cos_config"
fi
@@ -291,18 +281,12 @@ ensure_dir_structure $TARGET
# Download the container image
if [ -z "$directory" ]; then
echo ">>> Downloading container image"
kairos-agent pull-image $container_image $TARGET
elemental pull-image $( (( $local_build == 'true')) && printf %s '--local' ) $container_image $TARGET
else
echo ">>> Copying files from $directory"
rsync -axq --exclude='host' --exclude='mnt' --exclude='proc' --exclude='sys' --exclude='dev' --exclude='tmp' ${directory}/ $TARGET
fi
# We copy the grubmenu.cfg to a temporary location to be copied later in the state partition
# https://github.com/kairos-io/kairos/blob/62c67e3e61d49435c362014522e5c6696335376f/overlay/files/system/oem/08_grub.yaml#L105
# This is a hack and we need a better way: https://github.com/kairos-io/kairos/issues/1427
tmpgrubconfig=$(mktemp /tmp/grubmeny.cfg.XXXXXX)
cp -rfv $TARGET/etc/kairos/branding/grubmenu.cfg "${tmpgrubconfig}"
umount $TARGET
sync
@@ -328,10 +312,9 @@ cp -rfv ${STATEDIR}/cOS/active.img ${RECOVERY}/cOS/recovery.img
tune2fs -L ${SYSTEM_LABEL} ${RECOVERY}/cOS/recovery.img
# Install real grub config to recovery
cp -rfv /arm/raw/grubconfig/* $RECOVERY
mkdir -p $RECOVERY/grub2/fonts
cp -rfv /arm/raw/grubartifacts/* $RECOVERY/grub2
mv $RECOVERY/grub2/*pf2 $RECOVERY/grub2/fonts
cp -rfv /arm/grub/config/* $RECOVERY
mkdir -p $RECOVERY/grub2
cp -rfv /arm/grub/artifacts/* $RECOVERY/grub2
sync
@@ -343,47 +326,36 @@ if [ -z "$EFI" ]; then
exit 1
fi
cp -rfv /arm/raw/grubefi/* $EFI
cp -rfv /arm/grub/efi/* $EFI
if [ -n "$EFI" ] && [ -n "$efi_dir" ]; then
echo "Copy $efi_dir to EFI directory"
cp -rfv $efi_dir/* $EFI
fi
partprobe
echo ">> Writing image and partition table"
dd if=/dev/zero of="${output_image}" bs=1024000 count="${size}" || exit 1
# Image partitions
# only rpi4 supports gpt
if [ "$model" == "rpi3" ]; then
if [ "$model" == "rpi64" ]; then
sgdisk -n 1:8192:+96M -c 1:EFI -t 1:0c00 ${output_image}
sgdisk -n 2:0:+${state_size}M -c 2:state -t 2:8300 ${output_image}
sgdisk -n 3:0:+$(( recovery_size + oem_size ))M -c 3:lvm -t 3:8e00 ${output_image}
sgdisk -n 4:0:+64M -c 4:persistent -t 4:8300 ${output_image}
sgdisk -m 1:2:3:4 ${output_image}
sfdisk --part-type ${output_image} 1 c
elif [ "$model" == "rpi4" ]; then
echo "label: gpt" | sfdisk "${output_image}"
sgdisk -n 1:8192:+96M -c 1:EFI -t 1:0c00 ${output_image}
sgdisk -n 2:0:+${state_size}M -c 2:state -t 2:8300 ${output_image}
sgdisk -n 3:0:+${recovery_size}M -c 3:recovery -t 3:8300 ${output_image}
sgdisk -n 4:0:+${oem_size}M -c 4:oem -t 4:8300 ${output_image}
sgdisk -n 5:0:+64M -c 5:persistent -t 5:8300 ${output_image}
sgdisk -g ${output_image}
sgdisk -m 1:2:3:4:5 ${output_image}
else
sgdisk -n 1:8192:+16M -c 1:EFI -t 1:0700 ${output_image}
sgdisk -n 2:0:+${state_size}M -c 2:state -t 2:8300 ${output_image}
sgdisk -n 3:0:+$(( recovery_size + oem_size ))M -c 3:lvm -t 3:8e00 ${output_image}
sgdisk -n 4:0:+64M -c 4:persistent -t 4:8300 ${output_image}
sgdisk -m 1:2:3:4 ${output_image}
fi
sgdisk -n 2:0:+${state_size}M -c 2:state -t 2:8300 ${output_image}
if [ "$disable_lvm" == 'true' ]; then
sgdisk -n 3:0:+${recovery_size}M -c 3:recovery -t 3:8300 ${output_image}
else
sgdisk -n 3:0:+$(( ${recovery_size} + ${oem_size} ))M -c 3:lvm -t 3:8e00 ${output_image}
fi
sgdisk -n 4:0:+64M -c 4:persistent -t 4:8300 ${output_image}
sgdisk -m 1:2:3:4 ${output_image}
if [ "$model" == "rpi64" ]; then
sfdisk --part-type ${output_image} 1 c
fi
# Prepare the image and copy over the files
DRIVE=$(losetup -f "${output_image}" --show)
export DRIVE
export DRIVE=$(losetup -f "${output_image}" --show)
if [ -z "${DRIVE}" ]; then
echo "Cannot execute losetup for $output_image"
exit 1
@@ -400,74 +372,61 @@ export device="/dev/mapper/${device}"
partprobe
if [ "$model" == 'rpi4' ]; then
kpartx -vag $DRIVE
else
kpartx -va $DRIVE
fi
kpartx -va $DRIVE
echo ">> Populating partitions"
efi=${device}p1
state=${device}p2
recovery=${device}p3
if [ "$model" == 'rpi4' ]; then
oem=${device}p4
persistent=${device}p5
else
persistent=${device}p4
oem_lv=/dev/mapper/KairosVG-oem
recovery_lv=/dev/mapper/KairosVG-recovery
fi
persistent=${device}p4
oem_lv=/dev/mapper/KairosVG-oem
recovery_lv=/dev/mapper/KairosVG-recovery
# Create partitions (RECOVERY, STATE, COS_PERSISTENT)
mkfs.vfat -F 32 ${efi}
fatlabel ${efi} COS_GRUB
if [ "$disable_lvm" == 'true' ]; then
mkfs.ext4 -F -L ${RECOVERY_LABEL} $recovery
else
pvcreate $recovery
vgcreate KairosVG $recovery
lvcreate -Z n -n oem -L ${oem_size} KairosVG
lvcreate -Z n -n recovery -l 100%FREE KairosVG
vgchange -ay
vgmknodes
mkfs.ext4 -F -L ${OEM_LABEL} $oem_lv
mkfs.ext4 -F -L ${RECOVERY_LABEL} $recovery_lv
fi
mkfs.ext4 -F -L ${STATE_LABEL} $state
mkfs.ext4 -F -L ${PERSISTENT_LABEL} $persistent
if [ "$model" == 'rpi4' ]; then
mkfs.ext4 -F -L ${RECOVERY_LABEL} $recovery
mkfs.ext4 -F -L ${OEM_LABEL} $oem
else
pvcreate $recovery
vgcreate KairosVG $recovery
lvcreate -Z n -n oem -L ${oem_size} KairosVG
lvcreate -Z n -n recovery -l 100%FREE KairosVG
vgchange -ay
vgmknodes
mkfs.ext4 -F -L ${OEM_LABEL} $oem_lv
mkfs.ext4 -F -L ${RECOVERY_LABEL} $recovery_lv
fi
mkdir $WORKDIR/state
mkdir $WORKDIR/recovery
mkdir $WORKDIR/efi
mkdir $WORKDIR/oem
if [ "$disable_lvm" == 'true' ]; then
mount $recovery $WORKDIR/recovery
else
mount $recovery_lv $WORKDIR/recovery
fi
mount $state $WORKDIR/state
mount $efi $WORKDIR/efi
if [ "$model" == 'rpi4' ]; then
mount $recovery $WORKDIR/recovery
mount $oem $WORKDIR/oem
else
mount $recovery_lv $WORKDIR/recovery
mount $oem_lv $WORKDIR/oem
fi
mkdir $WORKDIR/persistent
mount $persistent $WORKDIR/persistent
mkdir -p $WORKDIR/persistent/cloud-config
cp -rfv /defaults.yaml $WORKDIR/oem/01_defaults.yaml
cp -rfv /defaults.yaml $WORKDIR/persistent/cloud-config/01_defaults.yaml
grub2-editenv $WORKDIR/state/grub_oem_env set "default_menu_entry=Kairos"
# Set a OEM config file if specified
if [ -n "$config" ]; then
echo ">> Copying $config OEM config file"
get_url $config $WORKDIR/oem/99_custom.yaml
get_url $config $WORKDIR/persistent/cloud-config/99_custom.yaml
fi
grub2-editenv $WORKDIR/state/grub_oem_env set "default_menu_entry=$menu_entry"
# We copy the file we saved earier to the STATE partition
cp -rfv "${tmpgrubconfig}" $WORKDIR/state/grubmenu
umount $WORKDIR/persistent
# Copy over content
cp -arf $EFI/* $WORKDIR/efi
@@ -477,28 +436,23 @@ cp -arf $STATEDIR/* $WORKDIR/state
umount $WORKDIR/recovery
umount $WORKDIR/state
umount $WORKDIR/efi
umount $WORKDIR/oem
if [ "$model" != 'rpi4' ]; then
vgchange -an
if [ "$disable_lvm" == 'false' ]; then
vgchange -an
fi
sync
# Flash uboot and vendor-specific bits
echo ">> Performing $model specific bits.."
/arm/boards/$model.sh ${DRIVE}
dmsetup remove $DRIVE
sync
sleep 5
sync
if [ "$model" == 'rpi4' ]; then
kpartx -dvg $DRIVE
else
kpartx -dv $DRIVE || true
fi
kpartx -dv $DRIVE || true
umount $DRIVE || true

View File

@@ -1,3 +1,6 @@
iso:
uefi:
- dir:/efi
image:
- dir:/grub2
- dir:/efi
- dir:/grub2

View File

@@ -1,8 +1,8 @@
#cloud-config
name: "Default user"
stages:
initramfs:
- name: "Set default user/pass"
users:
initramfs:
- name: "Set default user/pass"
users:
kairos:
passwd: "kairos"
passwd: "kairos"

View File

@@ -2,4 +2,4 @@
set -ex
enki --config-dir /config $@
elemental --config-dir /config $@

View File

@@ -1,7 +0,0 @@
#!ipxe
set dns 8.8.8.8
ifconf
kernel ${RELEASE_URL}/${VERSION}/${ISO_NAME}-kernel root=live:${RELEASE_URL}/${VERSION}/${ISO_NAME}.squashfs initrd=${ISO_NAME}-initrd rd.neednet=1 ip=dhcp rd.cos.disable netboot install-mode config_url=${config} console=tty1 console=ttyS0 rd.live.overlay.overlayfs ${cmdline}
initrd ${RELEASE_URL}/${VERSION}/${ISO_NAME}-initrd
boot

View File

@@ -1,15 +0,0 @@
general:
debug: false
spinner_charset: 9
logging:
enable_emoji: false
repositories:
- name: "kairos"
description: "kairos repository"
type: "docker"
cached: true
enable: true
priority: 2
urls:
- "quay.io/kairos/packages"
reference: 20240205172246-repository.yaml

View File

@@ -1,15 +0,0 @@
general:
debug: false
spinner_charset: 9
logging:
enable_emoji: false
repositories:
- name: "kairos-arm64"
description: "kairos repository arm64"
type: "docker"
cached: true
enable: true
priority: 2
urls:
- "quay.io/kairos/packages-arm64"
reference: 20240205172848-repository.yaml

24
tools-image/luet.yaml Normal file
View File

@@ -0,0 +1,24 @@
general:
debug: false
spinner_charset: 9
logging:
enable_emoji: false
repositories:
- name: "kairos"
description: "kairos repository"
type: "docker"
arch: amd64
cached: true
priority: 2
urls:
- "quay.io/kairos/packages"
reference: 20230512113938-repository.yaml
- name: "kairos-arm64"
description: "kairos repository arm64"
type: "docker"
arch: arm64
cached: true
priority: 2
urls:
- "quay.io/kairos/packages-arm64"
reference: 20230512115044-repository.yaml

View File

@@ -19,7 +19,7 @@ set initrd $ARTIFACT_NAME-initrd
set rootfs $ARTIFACT_NAME.squashfs
# set config https://example.com/machine-config
# set cmdline extra.values=1
kernel \${url}/\${kernel} initrd=\${initrd} ip=dhcp rd.cos.disable root=live:\${url}/\${rootfs} netboot install-mode config_url=\${config} console=tty1 console=ttyS0 \${cmdline}
kernel \${url}/\${kernel} initrd=\${initrd} ip=dhcp rd.cos.disable root=live:\${url}/\${rootfs} netboot nodepair.enable config_url=\${config} console=tty1 console=ttyS0 \${cmdline}
initrd \${url}/\${initrd}
boot
EOF

View File

@@ -1,13 +0,0 @@
KAIROS_NAME="${OS_NAME}"
KAIROS_VERSION="${OS_VERSION}"
KAIROS_ID="${OS_ID}"
KAIROS_ID_LIKE="${OS_NAME}"
KAIROS_VERSION_ID="${OS_VERSION}"
KAIROS_PRETTY_NAME="${OS_NAME} ${OS_VERSION}"
KAIROS_BUG_REPORT_URL="${BUG_REPORT_URL}"
KAIROS_HOME_URL="${HOME_URL}"
KAIROS_IMAGE_REPO="${OS_REPO}"
KAIROS_IMAGE_LABEL="${OS_LABEL}"
KAIROS_GITHUB_REPO="${GITHUB_REPO}"
KAIROS_VARIANT="${VARIANT}"
KAIROS_FLAVOR="${FLAVOR}"

View File

@@ -20,7 +20,6 @@ size="${SIZE:-7544}"
state_size="${STATE_SIZE:-4992}"
recovery_size="${RECOVERY_SIZE:-2192}"
default_active_size="${DEFAULT_ACTIVE_SIZE:-2400}"
menu_entry="${DEFAULT_MENU_ENTRY:-Kairos}"
container_image="${container_image:-quay.io/kairos/kairos-opensuse-leap-arm-rpi:v1.5.1-k3sv1.25.6-k3s1}"
@@ -38,7 +37,7 @@ mkdir -p $WORKDIR/tmpefi
# Create the EFI partition FAT16 and include the EFI image and a basic grub.cfg
truncate -s $((20*1024*1024)) bootloader/efi.img
cp -rfv /arm/raw/grubefi/* $WORKDIR/tmpefi
cp -rfv /arm/grub/efi/* $WORKDIR/tmpefi
mkfs.fat -F16 -n COS_GRUB bootloader/efi.img
mcopy -s -i bootloader/efi.img $WORKDIR/tmpefi/EFI ::EFI
@@ -68,12 +67,6 @@ else
rsync -axq --exclude='host' --exclude='mnt' --exclude='proc' --exclude='sys' --exclude='dev' --exclude='tmp' ${directory}/ $TARGET
fi
# We copy the grubmenu.cfg to a temporary location to be copied later in the state partition
# https://github.com/kairos-io/kairos/blob/62c67e3e61d49435c362014522e5c6696335376f/overlay/files/system/oem/08_grub.yaml#L105
# This is a hack and we need a better way: https://github.com/kairos-io/kairos/issues/1427
tmpgrubconfig=$(mktemp /tmp/grubmeny.cfg.XXXXXX)
cp -rfv $TARGET/etc/kairos/branding/grubmenu.cfg "${tmpgrubconfig}"
umount $TARGET
sync
@@ -93,10 +86,9 @@ cp -rfv ${STATEDIR}/cOS/active.img ${RECOVERY}/cOS/recovery.img
tune2fs -L ${SYSTEM_LABEL} ${RECOVERY}/cOS/recovery.img
# Install real grub config to recovery
cp -rfv /arm/raw/grubconfig/* $RECOVERY
mkdir -p $RECOVERY/grub2/fonts
cp -rfv /arm/raw/grubartifacts/* $RECOVERY/grub2
mv $RECOVERY/grub2/*pf2 $RECOVERY/grub2/fonts
cp -rfv /arm/grub/config/* $RECOVERY
mkdir -p $RECOVERY/grub2
cp -rfv /arm/grub/artifacts/* $RECOVERY/grub2
dd if=/dev/zero of=recovery_partition.img bs=1M count=$recovery_size
dd if=/dev/zero of=state_partition.img bs=1M count=$state_size
@@ -114,11 +106,7 @@ LOOP=$(losetup --show -f state_partition.img)
mkdir -p $WORKDIR/state
mount $LOOP $WORKDIR/state
cp -arf $STATEDIR/* $WORKDIR/state
grub2-editenv $WORKDIR/state/grub_oem_env set "default_menu_entry=$menu_entry"
# We copy the file we saved earier to the STATE partition
cp -rfv "${tmpgrubconfig}" $WORKDIR/state/grubmenu
grub2-editenv $WORKDIR/state/grub_oem_env set "default_menu_entry=Kairos"
umount $WORKDIR/state
losetup -d $LOOP

View File

@@ -71,7 +71,7 @@ truncate -s "+$((1024*1024))" $OUT
if [ -n "$EXTEND" ]; then
echo "Extending image of $EXTEND MB"
truncate -s "+$((EXTEND*1024*1024))" $OUT
truncate -s "+$(($EXTEND*1024*1024))" $OUT
fi
# Create the partition table in $OUT (assumes sectors of 512 bytes)

View File

@@ -1,23 +0,0 @@
#!/bin/bash
# usage:
# docker run --rm -ti --entrypoint /update-os-release.sh \
# -v /etc:/workspace \ # mount the directory where your os-release is, this is by default in /etc but you can mount a different dir for testing
# -e OS_NAME=kairos-core-opensuse-leap \
# -e OS_VERSION=v2.2.0 \
# -e OS_ID="kairos" \
# -e OS_NAME=kairos-core-opensuse-leap \
# -e BUG_REPORT_URL="https://github.com/kairos-io/kairos/issues" \
# -e HOME_URL="https://github.com/kairos-io/kairos" \
# -e OS_REPO="quay.io/kairos/core-opensuse-leap" \
# -e OS_LABEL="latest" \
# -e GITHUB_REPO="kairos-io/kairos" \
# -e VARIANT="core" \
# -e FLAVOR="opensuse-leap"
# quay.io/kairos/osbuilder-tools:latest
set -ex
sed -i -n '/KAIROS_/!p' /workspace/os-release
envsubst >>/workspace/os-release < /os-release.tmpl
cat /workspace/os-release