Merge pull request #9880 from zvonkok/helm-chart

kata-deploy: Add Helm Chart
This commit is contained in:
Fabiano Fidêncio
2024-08-06 22:55:31 +02:00
committed by GitHub
11 changed files with 300 additions and 111 deletions

View File

@@ -529,6 +529,29 @@ function ensure_yq() {
hash -d yq 2> /dev/null || true # yq is preinstalled on GHA Ubuntu 22.04 runners so we clear Bash's PATH cache.
}
function ensure_helm() {
ensure_yq
# The get-helm-3 script will take care of downloaading and installing Helm
# properly on the system respecting ARCH, OS and other configurations.
DESIRED_VERSION=$(get_from_kata_deps ".externals.helm.version")
export DESIRED_VERSION
# Check if helm is available in the system's PATH
if ! command -v helm &> /dev/null; then
echo "Helm is not installed. Installing Helm..."
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Verify the installation
if command -v helm &> /dev/null; then
echo "Helm installed successfully."
else
echo "Failed to install Helm."
exit 1
fi
else
echo "Helm is already installed."
fi
}
# dependency: What we want to get the version from the versions.yaml file
function get_from_kata_deps() {
versions_file="${repo_root_dir}/versions.yaml"

View File

@@ -17,6 +17,7 @@ source "${kubernetes_dir}/../../gha-run-k8s-common.sh"
source "${kubernetes_dir}/confidential_kbs.sh"
# shellcheck disable=2154
tools_dir="${repo_root_dir}/tools"
helm_chart_dir="${tools_dir}/packaging/kata-deploy/helm-chart/kata-deploy"
kata_tarball_dir="${2:-kata-artifacts}"
DOCKER_REGISTRY=${DOCKER_REGISTRY:-quay.io}
@@ -146,6 +147,7 @@ function deploy_coco_kbs() {
function deploy_kata() {
platform="${1}"
ensure_helm
ensure_yq
[ "$platform" = "kcli" ] && \
@@ -157,86 +159,59 @@ function deploy_kata() {
set_default_cluster_namespace
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
local values_yaml
values_yaml=$(mktemp /tmp/values_yaml.XXXXXX)
# Enable debug for Kata Containers
yq -i \
'.spec.template.spec.containers[0].env[1].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Create the runtime class only for the shim that's being tested
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Set the tested hypervisor as the default `kata` shim
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable 'default_vcpus' hypervisor annotation
yq -i \
'.spec.template.spec.containers[0].env[6].value = "default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i ".k8sDistribution = \"${KUBERNETES}\"" "${values_yaml}"
yq -i ".image.reference = \"${DOCKER_REGISTRY}/${DOCKER_REPO}\"" "${values_yaml}"
yq -i ".image.tag = \"${DOCKER_TAG}\"" "${values_yaml}"
yq -i ".env.debug = \"true\"" "${values_yaml}"
yq -i ".env.shims = \"${KATA_HYPERVISOR}\"" "${values_yaml}"
yq -i ".env.defaultShim = \"${KATA_HYPERVISOR}\"" "${values_yaml}"
yq -i ".env.createRuntimeClasses = \"true\"" "${values_yaml}"
yq -i ".env.createDefaultRuntimeClass = \"true\"" "${values_yaml}"
yq -i ".env.allowedHypervisorAnnotations = \"default_vcpus\"" "${values_yaml}"
yq -i ".env.snapshotterHandlerMapping = \"\"" "${values_yaml}"
yq -i ".env.agentHttpsProxy = \"\"" "${values_yaml}"
yq -i ".env.agentNoProxy = \"\"" "${values_yaml}"
yq -i ".env.pullTypeMapping = \"\"" "${values_yaml}"
yq -i ".env.hostOS = \"\"" "${values_yaml}"
if [ -n "${SNAPSHOTTER}" ]; then
yq -i \
".spec.template.spec.containers[0].env[7].value = \"${KATA_HYPERVISOR}:${SNAPSHOTTER}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i ".env.snapshotterHandlerMapping = \"${KATA_HYPERVISOR}:${SNAPSHOTTER}\"" "${values_yaml}"
fi
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq -i \
'.spec.template.spec.containers[0].env[6].value = "initrd kernel default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i \
".spec.template.spec.containers[0].env += [{\"name\": \"HOST_OS\", \"value\": \"${KATA_HOST_OS}\"}]" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i ".env.allowedHypervisorAnnotations = \"initrd kernel default_vcpus\"" "${values_yaml}"
yq -i ".env.hostOS = \"${KATA_HOST_OS}\"" "${values_yaml}"
fi
if [ "${KATA_HYPERVISOR}" = "qemu" ]; then
yq -i \
'.spec.template.spec.containers[0].env[6].value = "image initrd kernel default_vcpus"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i ".env.allowedHypervisorAnnotations = \"image initrd kernel default_vcpus\"" "${values_yaml}"
fi
if [ "${KATA_HYPERVISOR}" = "qemu-tdx" ]; then
yq -i \
".spec.template.spec.containers[0].env[8].value = \"${HTTPS_PROXY}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i \
".spec.template.spec.containers[0].env[9].value = \"${NO_PROXY}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i ".env.agentHttpsProxy = \"${HTTPS_PROXY}\"" "${values_yaml}"
yq -i ".env.agentNoProxy = \"${NO_PROXY}\"" "${values_yaml}"
fi
# Set the PULL_TYPE_MAPPING
if [ "${PULL_TYPE}" != "default" ]; then
yq -i \
".spec.template.spec.containers[0].env[10].value = \"${KATA_HYPERVISOR}:${PULL_TYPE}\"" \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
yq -i ".env.pullTypeMapping = \"${KATA_HYPERVISOR}:${PULL_TYPE}\"" "${values_yaml}"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" || die "Failed to setup the tests image"
echo "::group::Final kata-deploy manifests used in the test"
cat "${values_yaml}"
helm template "${helm_chart_dir}" --values "${values_yaml}" --namespace kube-system
[ "$(yq .image.reference ${values_yaml})" = "${DOCKER_REGISTRY}/${DOCKER_REPO}" ] || die "Failed to set image reference"
[ "$(yq .image.tag ${values_yaml})" = "${DOCKER_TAG}" ] || die "Failed to set image tag"
echo "::endgroup::"
kubectl_retry apply -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
case "${KUBERNETES}" in
k0s) kubectl_retry apply -k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/k0s" ;;
k3s) kubectl_retry apply -k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/k3s" ;;
rke2) kubectl_retry apply -k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/rke2" ;;
*) kubectl_retry apply -f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
esac
local cmd="kubectl -n kube-system get -l name=kata-deploy pod 2>/dev/null | grep '\<Running\>'"
waitForProcess "${KATA_DEPLOY_WAIT_TIMEOUT}" 10 "$cmd"
# will wait until all Pods, PVCs, Services, and minimum number of Pods
# of a Deployment, StatefulSet, or ReplicaSet are in a ready state
# before marking the release as successful. It will wait for as long
# as --timeout -- Ready >> Running
helm install --wait --timeout 10m kata-deploy "${helm_chart_dir}" --values "${values_yaml}" --namespace kube-system
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
# which may cause issues like not having the node properly labeled or the artefacts
@@ -406,58 +381,11 @@ function collect_artifacts() {
}
function cleanup_kata_deploy() {
ensure_yq
ensure_helm
case "${KUBERNETES}" in
k0s)
deploy_spec="-k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/k0s""
cleanup_spec="-k "${tools_dir}/packaging/kata-deploy/kata-cleanup/overlays/k0s""
;;
k3s)
deploy_spec="-k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/k3s""
cleanup_spec="-k "${tools_dir}/packaging/kata-deploy/kata-cleanup/overlays/k3s""
;;
rke2)
deploy_spec="-k "${tools_dir}/packaging/kata-deploy/kata-deploy/overlays/rke2""
cleanup_spec="-k "${tools_dir}/packaging/kata-deploy/kata-cleanup/overlays/rke2""
;;
*)
deploy_spec="-f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml""
cleanup_spec="-f "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml""
;;
esac
# shellcheck disable=2086
kubectl_retry delete --ignore-not-found ${deploy_spec}
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq -i \
'.spec.template.spec.containers[0].env[4].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Create the runtime class only for the shim that's being tested
yq -i \
".spec.template.spec.containers[0].env[2].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Set the tested hypervisor as the default `kata` shim
yq -i \
".spec.template.spec.containers[0].env[3].value = \"${KATA_HYPERVISOR}\"" \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
# Let the `kata-deploy` create the default `kata` runtime class
yq -i \
'.spec.template.spec.containers[0].env[5].value = "true"' \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" || die "Failed to setup the tests image"
# shellcheck disable=2086
kubectl_retry apply ${cleanup_spec}
sleep 180s
# shellcheck disable=2086
kubectl_retry delete --ignore-not-found ${cleanup_spec}
kubectl_retry delete --ignore-not-found -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
# Do not return after deleting only the parent object cascade=foreground
# means also wait for child/dependent object deletion
helm uninstall kata-deploy --ignore-not-found --wait --cascade foreground --timeout 10m --namespace kube-system
}
function cleanup() {

View File

@@ -0,0 +1,18 @@
# Copyright (c) 2024 NVIDIA Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
VERSION_FILE := ../../../../VERSION
release:
sed -i 's/appVersion: .*/appVersion: $(shell cat $(VERSION_FILE))/g' kata-deploy/Chart.yaml
all: package
package: helm release
$(HELM) package ./kata-deploy
.PHONY: clean
clean:
rm kata-deploy-*.tgz

View File

@@ -0,0 +1,28 @@
# Copyright (c) 2024 NVIDIA Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: kata-deploy
description: A Helm chart for deploying Kata Containers
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: "3.6.0"
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 3.6.0

View File

@@ -0,0 +1,20 @@
# Copyright (c) 2024 NVIDIA Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
{{/*
Set the correct containerd conf path depending on the k8s distribution
*/}}
{{- define "containerdConfPath" -}}
{{- if eq .k8sDistribution "rke2" -}}
/var/lib/rancher/rke2/agent/etc/containerd/
{{- else if eq .k8sDistribution "k3s" -}}
/var/lib/rancher/k3s/agent/etc/containerd/
{{- else if eq .k8sDistribution "k0s" -}}
/etc/k0s/containerd.d/
{{- else -}}
/etc/containerd/
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,92 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: {{ .Chart.Name }}
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
name: {{ .Chart.Name }}
template:
metadata:
labels:
name: {{ .Chart.Name }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 6 }}
{{- end }}
serviceAccountName: {{ .Chart.Name }}-sa
hostPID: true
containers:
- name: kube-kata
image: {{ .Values.image.reference }}:{{ default .Chart.AppVersion .Values.image.tag }}
imagePullPolicy: {{ .Values.imagePullPolicy }}
lifecycle:
preStop:
exec:
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"]
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install"]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: DEBUG
value: {{ .Values.env.debug | quote }}
- name: SHIMS
value: {{ .Values.env.shims | quote }}
- name: DEFAULT_SHIM
value: {{ .Values.env.defaultShim | quote }}
- name: CREATE_RUNTIMECLASSES
value: {{ .Values.env.createRuntimeClasses | quote }}
- name: CREATE_DEFAULT_RUNTIMECLASS
value: {{ .Values.env.createDefaultRuntimeClass | quote }}
- name: ALLOWED_HYPERVISOR_ANNOTATIONS
value: {{ .Values.env.allowedHypervisorAnnotations | quote }}
- name: SNAPSHOTTER_HANDLER_MAPPING
value: {{ .Values.env.snapshotterHandlerMapping | quote }}
- name: AGENT_HTTPS_PROXY
value: {{ .Values.env.agentHttpsProxy | quote }}
- name: AGENT_NO_PROXY
value: {{ .Values.env.agentNoProxy | quote }}
- name: PULL_TYPE_MAPPING
value: {{ .Values.env.pullTypeMapping | quote }}
{{- with .Values.env.hostOS }}
- name: HOST_OS
value: {{ . | quote }}
{{- end }}
securityContext:
privileged: true
volumeMounts:
- name: crio-conf
mountPath: /etc/crio/
- name: containerd-conf
mountPath: /etc/containerd/
- name: kata-artifacts
mountPath: /opt/kata/
- name: local-bin
mountPath: /usr/local/bin/
- name: host
mountPath: /host/
volumes:
- name: crio-conf
hostPath:
path: /etc/crio/
- name: containerd-conf
hostPath:
path: '{{- template "containerdConfPath" .Values }}'
- name: kata-artifacts
hostPath:
path: /opt/kata/
type: DirectoryOrCreate
- name: local-bin
hostPath:
path: /usr/local/bin/
- name: host
hostPath:
path: /
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View File

@@ -0,0 +1,31 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Chart.Name }}-sa
namespace: {{ .Release.Namespace }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Chart.Name }}-role
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "patch"]
- apiGroups: ["node.k8s.io"]
resources: ["runtimeclasses"]
verbs: ["create", "delete", "get", "list", "patch", "update", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Chart.Name }}-rb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Chart.Name }}-role
subjects:
- kind: ServiceAccount
name: {{ .Chart.Name }}-sa
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,19 @@
imagePullPolicy: Always
imagePullSecrets: []
image:
reference: quay.io/kata-containers/kata-deploy
tag: ""
# k8s-dist can be k8s, k3s, rke2, k0s
k8sDistribution: "k8s"
env:
debug: "false"
shims: "clh cloud-hypervisor dragonball fc qemu qemu-coco-dev qemu-runtime-rs qemu-sev qemu-snp qemu-tdx stratovirt qemu-nvidia-gpu qemu-nvidia-gpu-snp qemu-nvidia-gpu-tdx"
defaultShim: "qemu"
createRuntimeClasses: "false"
createDefaultRuntimeClass: "false"
allowedHypervisorAnnotations: ""
snapshotterHandlerMapping: ""
agentHttpsProxy: ""
agentNoProxy: ""
pullTypeMapping: ""
hostOS: ""

View File

@@ -706,6 +706,7 @@ function main() {
cleanup_cri_runtime "$runtime"
kubectl label node "$NODE_NAME" --overwrite katacontainers.io/kata-runtime=cleanup
remove_artifacts
reset_runtime "$runtime"
;;
reset)
reset_runtime $runtime

View File

@@ -218,6 +218,11 @@ externals:
version: "1.36.1"
url: "https://busybox.net/downloads"
helm:
description: "Kubernetes package manager"
url: "https://get.helm.sh/"
version: "v3.15.2"
cni-plugins:
description: "CNI network plugins"
url: "https://github.com/containernetworking/plugins"