mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-05-02 05:34:46 +00:00
tests: Create a helm_helper() common function
Let's use what we have in the k8s functional tests to create a common function to deploy kata containers using our helm charts. This will help us immensely in the kata-deploy testing side in the near future. Signed-off-by: Fabiano Fidêncio <fabiano@fidencio.org>
This commit is contained in:
parent
eb884d33a8
commit
f7976a40e4
tests
@ -7,6 +7,7 @@
|
|||||||
tests_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
tests_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
source "${tests_dir}/common.bash"
|
source "${tests_dir}/common.bash"
|
||||||
kubernetes_dir="${tests_dir}/integration/kubernetes"
|
kubernetes_dir="${tests_dir}/integration/kubernetes"
|
||||||
|
helm_chart_dir="${repo_root_dir}/tools/packaging/kata-deploy/helm-chart/kata-deploy"
|
||||||
|
|
||||||
AZ_APPID="${AZ_APPID:-}"
|
AZ_APPID="${AZ_APPID:-}"
|
||||||
AZ_PASSWORD="${AZ_PASSWORD:-}"
|
AZ_PASSWORD="${AZ_PASSWORD:-}"
|
||||||
@ -14,6 +15,22 @@ AZ_SUBSCRIPTION_ID="${AZ_SUBSCRIPTION_ID:-}"
|
|||||||
AZ_TENANT_ID="${AZ_TENANT_ID:-}"
|
AZ_TENANT_ID="${AZ_TENANT_ID:-}"
|
||||||
GENPOLICY_PULL_METHOD="${GENPOLICY_PULL_METHOD:-oci-distribution}"
|
GENPOLICY_PULL_METHOD="${GENPOLICY_PULL_METHOD:-oci-distribution}"
|
||||||
GH_PR_NUMBER="${GH_PR_NUMBER:-}"
|
GH_PR_NUMBER="${GH_PR_NUMBER:-}"
|
||||||
|
HELM_DEFAULT_INSTALLATION="${HELM_DEFAULT_INSTALLATION:-false}"
|
||||||
|
HELM_AGENT_HTTPS_PROXY="${HELM_AGENT_HTTPS_PROXY:-}"
|
||||||
|
HELM_AGENT_NO_PROXY="${HELM_AGENT_NO_PROXY:-}"
|
||||||
|
HELM_ALLOWED_HYPERVISOR_ANNOTATIONS="${HELM_ALLOWED_HYPERVISOR_ANNOTATIONS:-}"
|
||||||
|
HELM_CREATE_RUNTIME_CLASSES="${HELM_CREATE_RUNTIME_CLASSES:-}"
|
||||||
|
HELM_CREATE_DEFAULT_RUNTIME_CLASS="${HELM_CREATE_DEFAULT_RUNTIME_CLASS:-}"
|
||||||
|
HELM_DEBUG="${HELM_DEBUG:-}"
|
||||||
|
HELM_DEFAULT_SHIM="${HELM_DEFAULT_SHIM:-}"
|
||||||
|
HELM_HOST_OS="${HELM_HOST_OS:-}"
|
||||||
|
HELM_IMAGE_REFERENCE="${HELM_IMAGE_REFERENCE:-}"
|
||||||
|
HELM_IMAGE_TAG="${HELM_IMAGE_TAG:-}"
|
||||||
|
HELM_K8S_DISTRIBUTION="${HELM_K8S_DISTRIBUTION:-}"
|
||||||
|
HELM_PULL_TYPE_MAPPING="${HELM_PULL_TYPE_MAPPING:-}"
|
||||||
|
HELM_SHIMS="${HELM_SHIMS:-}"
|
||||||
|
HELM_SNAPSHOTTER_HANDLER_MAPPING="${HELM_SNAPSHOTTER_HANDLER_MAPPING:-}"
|
||||||
|
KATA_DEPLOY_WAIT_TIMEOUT="${KATA_DEPLOY_WAIT_TIMEOUT:-600}"
|
||||||
KATA_HOST_OS="${KATA_HOST_OS:-}"
|
KATA_HOST_OS="${KATA_HOST_OS:-}"
|
||||||
KUBERNETES="${KUBERNETES:-}"
|
KUBERNETES="${KUBERNETES:-}"
|
||||||
K8S_TEST_HOST_TYPE="${K8S_TEST_HOST_TYPE:-small}"
|
K8S_TEST_HOST_TYPE="${K8S_TEST_HOST_TYPE:-small}"
|
||||||
@ -427,3 +444,95 @@ function delete_test_runners(){
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function helm_helper() {
|
||||||
|
local max_tries
|
||||||
|
local interval
|
||||||
|
local i
|
||||||
|
local values_yaml
|
||||||
|
|
||||||
|
ensure_yq
|
||||||
|
ensure_helm
|
||||||
|
|
||||||
|
values_yaml=$(mktemp -t values_yaml.XXXXXX)
|
||||||
|
|
||||||
|
if [[ -z "${HELM_IMAGE_REFERENCE}" ]]; then
|
||||||
|
die "HELM_IMAGE_REFERENCE environment variable cannot be empty."
|
||||||
|
fi
|
||||||
|
yq -i ".image.reference = \"${HELM_IMAGE_REFERENCE}\"" "${values_yaml}"
|
||||||
|
|
||||||
|
if [[ -z "${HELM_IMAGE_TAG}" ]]; then
|
||||||
|
die "HELM_IMAGE_TAG environment variable cannot be empty."
|
||||||
|
fi
|
||||||
|
yq -i ".image.tag = \"${HELM_IMAGE_TAG}\"" "${values_yaml}"
|
||||||
|
|
||||||
|
[[ -n "${HELM_K8S_DISTRIBUTION}" ]] && yq -i ".k8sDistribution = \"${HELM_K8S_DISTRIBUTION}\"" "${values_yaml}"
|
||||||
|
|
||||||
|
if [[ "${HELM_DEFAULT_INSTALLATION}" = "false" ]]; then
|
||||||
|
[[ -n "${HELM_DEBUG}" ]] && yq -i ".env.debug = \"${HELM_DEBUG}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_SHIMS}" ]] && yq -i ".env.shims = \"${HELM_SHIMS}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_DEFAULT_SHIM}" ]] && yq -i ".env.defaultShim = \"${HELM_DEFAULT_SHIM}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_CREATE_RUNTIME_CLASSES}" ]] && yq -i ".env.createRuntimeClasses = \"${HELM_CREATE_RUNTIME_CLASSES}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_CREATE_DEFAULT_RUNTIME_CLASS}" ]] && yq -i ".env.createDefaultRuntimeClass = \"${HELM_CREATE_DEFAULT_RUNTIME_CLASS}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_ALLOWED_HYPERVISOR_ANNOTATIONS}" ]] && yq -i ".env.allowedHypervisorAnnotations = \"${HELM_ALLOWED_HYPERVISOR_ANNOTATIONS}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_SNAPSHOTTER_HANDLER_MAPPING}" ]] && yq -i ".env.snapshotterHandlerMapping = \"${HELM_SNAPSHOTTER_HANDLER_MAPPING}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_AGENT_HTTPS_PROXY}" ]] && yq -i ".env.agentHttpsProxy = \"${HELM_AGENT_HTTPS_PROXY}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_AGENT_NO_PROXY}" ]] && yq -i ".env.agentNoProxy = \"${HELM_AGENT_NO_PROXY}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_PULL_TYPE_MAPPING}" ]] && yq -i ".env.pullTypeMapping = \"${HELM_PULL_TYPE_MAPPING}\"" "${values_yaml}"
|
||||||
|
[[ -n "${HELM_HOST_OS}" ]] && yq -i ".env.hostOS=\"${HELM_HOST_OS}\"" "${values_yaml}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "::group::Final kata-deploy manifests used in the test"
|
||||||
|
cat "${values_yaml}"
|
||||||
|
echo ""
|
||||||
|
helm template "${helm_chart_dir}" --values "${values_yaml}" --namespace kube-system
|
||||||
|
[[ "$(yq .image.reference "${values_yaml}")" = "${HELM_IMAGE_REFERENCE}" ]] || die "Failed to set image reference"
|
||||||
|
[[ "$(yq .image.tag "${values_yaml}")" = "${HELM_IMAGE_TAG}" ]] || die "Failed to set image tag"
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
max_tries=3
|
||||||
|
interval=10
|
||||||
|
i=10
|
||||||
|
|
||||||
|
# Retry loop for helm install to prevent transient failures due to instantly unreachable cluster
|
||||||
|
set +e # Disable immediate exit on failure
|
||||||
|
while true; do
|
||||||
|
helm upgrade --install kata-deploy "${helm_chart_dir}" --values "${values_yaml}" --namespace kube-system --debug
|
||||||
|
ret=${?}
|
||||||
|
if [[ ${ret} -eq 0 ]]; then
|
||||||
|
echo "Helm install succeeded!"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
i=$((i+1))
|
||||||
|
if [[ ${i} -lt ${max_tries} ]]; then
|
||||||
|
echo "Retrying after ${interval} seconds (Attempt ${i} of $((max_tries - 1)))"
|
||||||
|
else
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep "${interval}"
|
||||||
|
done
|
||||||
|
set -e # Re-enable immediate exit on failure
|
||||||
|
if [[ ${i} -eq ${max_tries} ]]; then
|
||||||
|
die "Failed to deploy kata-deploy after ${max_tries} tries"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# `helm install --wait` does not take effect on single replicas and maxUnavailable=1 DaemonSets
|
||||||
|
# like kata-deploy on CI. So wait for pods being Running in the "traditional" way.
|
||||||
|
local cmd
|
||||||
|
cmd="kubectl -n kube-system get -l name=kata-deploy pod 2>/dev/null | grep '\<Running\>'"
|
||||||
|
waitForProcess "${KATA_DEPLOY_WAIT_TIMEOUT}" 10 "${cmd}"
|
||||||
|
|
||||||
|
# FIXME: This is needed as the kata-deploy pod will be set to "Ready"
|
||||||
|
# when it starts running, which may cause issues like not having the
|
||||||
|
# node properly labeled or the artefacts properly deployed when the
|
||||||
|
# tests actually start running.
|
||||||
|
sleep 60s
|
||||||
|
|
||||||
|
echo "::group::kata-deploy logs"
|
||||||
|
kubectl_retry -n kube-system logs --tail=100 -l name=kata-deploy
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Runtime classes"
|
||||||
|
kubectl_retry get runtimeclass
|
||||||
|
echo "::endgroup::"
|
||||||
|
}
|
||||||
|
@ -17,13 +17,11 @@ source "${kubernetes_dir}/../../gha-run-k8s-common.sh"
|
|||||||
source "${kubernetes_dir}/confidential_kbs.sh"
|
source "${kubernetes_dir}/confidential_kbs.sh"
|
||||||
# shellcheck disable=2154
|
# shellcheck disable=2154
|
||||||
tools_dir="${repo_root_dir}/tools"
|
tools_dir="${repo_root_dir}/tools"
|
||||||
helm_chart_dir="${tools_dir}/packaging/kata-deploy/helm-chart/kata-deploy"
|
|
||||||
kata_tarball_dir="${2:-kata-artifacts}"
|
kata_tarball_dir="${2:-kata-artifacts}"
|
||||||
|
|
||||||
export DOCKER_REGISTRY="${DOCKER_REGISTRY:-quay.io}"
|
export DOCKER_REGISTRY="${DOCKER_REGISTRY:-quay.io}"
|
||||||
export DOCKER_REPO="${DOCKER_REPO:-kata-containers/kata-deploy-ci}"
|
export DOCKER_REPO="${DOCKER_REPO:-kata-containers/kata-deploy-ci}"
|
||||||
export DOCKER_TAG="${DOCKER_TAG:-kata-containers-latest}"
|
export DOCKER_TAG="${DOCKER_TAG:-kata-containers-latest}"
|
||||||
export KATA_DEPLOY_WAIT_TIMEOUT="${KATA_DEPLOY_WAIT_TIMEOUT:-600}"
|
|
||||||
export SNAPSHOTTER_DEPLOY_WAIT_TIMEOUT="${SNAPSHOTTER_DEPLOY_WAIT_TIMEOUT:-8m}"
|
export SNAPSHOTTER_DEPLOY_WAIT_TIMEOUT="${SNAPSHOTTER_DEPLOY_WAIT_TIMEOUT:-8m}"
|
||||||
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||||
export CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-containerd}"
|
export CONTAINER_RUNTIME="${CONTAINER_RUNTIME:-containerd}"
|
||||||
@ -165,8 +163,6 @@ function deploy_coco_kbs() {
|
|||||||
|
|
||||||
function deploy_kata() {
|
function deploy_kata() {
|
||||||
platform="${1:-}"
|
platform="${1:-}"
|
||||||
ensure_helm
|
|
||||||
ensure_yq
|
|
||||||
|
|
||||||
[[ "${platform}" = "kcli" ]] && \
|
[[ "${platform}" = "kcli" ]] && \
|
||||||
export KUBECONFIG="${HOME}/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
export KUBECONFIG="${HOME}/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
||||||
@ -177,106 +173,44 @@ function deploy_kata() {
|
|||||||
|
|
||||||
set_default_cluster_namespace
|
set_default_cluster_namespace
|
||||||
|
|
||||||
local values_yaml
|
ANNOTATIONS="default_vcpus"
|
||||||
values_yaml=$(mktemp /tmp/values_yaml.XXXXXX)
|
|
||||||
|
|
||||||
yq -i ".k8sDistribution = \"${KUBERNETES}\"" "${values_yaml}"
|
|
||||||
yq -i ".image.reference = \"${DOCKER_REGISTRY}/${DOCKER_REPO}\"" "${values_yaml}"
|
|
||||||
yq -i ".image.tag = \"${DOCKER_TAG}\"" "${values_yaml}"
|
|
||||||
yq -i ".env.debug = \"true\"" "${values_yaml}"
|
|
||||||
yq -i ".env.shims = \"${KATA_HYPERVISOR}\"" "${values_yaml}"
|
|
||||||
yq -i ".env.defaultShim = \"${KATA_HYPERVISOR}\"" "${values_yaml}"
|
|
||||||
yq -i ".env.createRuntimeClasses = \"true\"" "${values_yaml}"
|
|
||||||
yq -i ".env.createDefaultRuntimeClass = \"true\"" "${values_yaml}"
|
|
||||||
yq -i ".env.allowedHypervisorAnnotations = \"default_vcpus\"" "${values_yaml}"
|
|
||||||
yq -i ".env.snapshotterHandlerMapping = \"\"" "${values_yaml}"
|
|
||||||
yq -i ".env.agentHttpsProxy = \"\"" "${values_yaml}"
|
|
||||||
yq -i ".env.agentNoProxy = \"\"" "${values_yaml}"
|
|
||||||
yq -i ".env.pullTypeMapping = \"\"" "${values_yaml}"
|
|
||||||
yq -i ".env.hostOS = \"\"" "${values_yaml}"
|
|
||||||
|
|
||||||
if [[ -n "${SNAPSHOTTER}" ]]; then
|
|
||||||
yq -i ".env.snapshotterHandlerMapping = \"${KATA_HYPERVISOR}:${SNAPSHOTTER}\"" "${values_yaml}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
|
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
|
||||||
yq -i ".env.allowedHypervisorAnnotations = \"image kernel default_vcpus\"" "${values_yaml}"
|
ANNOTATIONS="image kernel default_vcpus"
|
||||||
yq -i ".env.hostOS = \"${KATA_HOST_OS}\"" "${values_yaml}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${KATA_HYPERVISOR}" = "qemu" ]]; then
|
if [[ "${KATA_HYPERVISOR}" = "qemu" ]]; then
|
||||||
yq -i ".env.allowedHypervisorAnnotations = \"image initrd kernel default_vcpus\"" "${values_yaml}"
|
ANNOTATIONS="image initrd kernel default_vcpus"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${KATA_HYPERVISOR}" = "qemu-tdx" ]]; then
|
SNAPSHOTTER_HANDLER_MAPPING=""
|
||||||
yq -i ".env.agentHttpsProxy = \"${HTTPS_PROXY}\"" "${values_yaml}"
|
if [[ -n "${SNAPSHOTTER}" ]]; then
|
||||||
yq -i ".env.agentNoProxy = \"${NO_PROXY}\"" "${values_yaml}"
|
SNAPSHOTTER_HANDLER_MAPPING="${KATA_HYPERVISOR}:${SNAPSHOTTER}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set the PULL_TYPE_MAPPING
|
PULL_TYPE_MAPPING=""
|
||||||
if [[ "${PULL_TYPE}" != "default" ]]; then
|
if [[ "${PULL_TYPE}" != "default" ]]; then
|
||||||
yq -i ".env.pullTypeMapping = \"${KATA_HYPERVISOR}:${PULL_TYPE}\"" "${values_yaml}"
|
PULL_TYPE_MAPPING="${KATA_HYPERVISOR}:${PULL_TYPE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "::group::Final kata-deploy manifests used in the test"
|
HOST_OS=""
|
||||||
cat "${values_yaml}"
|
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
|
||||||
helm template "${helm_chart_dir}" --values "${values_yaml}" --namespace kube-system
|
HOST_OS="${KATA_HOST_OS}"
|
||||||
[[ "$(yq .image.reference "${values_yaml}")" = "${DOCKER_REGISTRY}/${DOCKER_REPO}" ]] || die "Failed to set image reference"
|
|
||||||
[[ "$(yq .image.tag "${values_yaml}")" = "${DOCKER_TAG}" ]] || die "Failed to set image tag"
|
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
local max_tries
|
|
||||||
local interval
|
|
||||||
local i
|
|
||||||
|
|
||||||
max_tries=3
|
|
||||||
interval=10
|
|
||||||
i=10
|
|
||||||
|
|
||||||
# Retry loop for helm install to prevent transient failures due to instantly unreachable cluster
|
|
||||||
set +e # Disable immediate exit on failure
|
|
||||||
while true; do
|
|
||||||
helm upgrade --install kata-deploy "${helm_chart_dir}" --values "${values_yaml}" --namespace kube-system --debug
|
|
||||||
ret=${?}
|
|
||||||
if [[ ${ret} -eq 0 ]]; then
|
|
||||||
echo "Helm install succeeded!"
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
i=$((i+1))
|
|
||||||
if [[ ${i} -lt ${max_tries} ]]; then
|
|
||||||
echo "Retrying after ${interval} seconds (Attempt ${i} of $((max_tries - 1)))"
|
|
||||||
else
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep "${interval}"
|
|
||||||
done
|
|
||||||
set -e # Re-enable immediate exit on failure
|
|
||||||
if [[ ${i} -eq ${max_tries} ]]; then
|
|
||||||
die "Failed to deploy kata-deploy after ${max_tries} tries"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# `helm install --wait` does not take effect on single replicas and maxUnavailable=1 DaemonSets
|
export HELM_K8S_DISTRIBUTION="${KUBERNETES}"
|
||||||
# like kata-deploy on CI. So wait for pods being Running in the "tradicional" way.
|
export HELM_IMAGE_REFERENCE="${DOCKER_REGISTRY}/${DOCKER_REPO}"
|
||||||
local cmd
|
export HELM_IMAGE_TAG="${DOCKER_TAG}"
|
||||||
cmd="kubectl -n kube-system get -l name=kata-deploy pod 2>/dev/null | grep '\<Running\>'"
|
export HELM_DEBUG="true"
|
||||||
waitForProcess "${KATA_DEPLOY_WAIT_TIMEOUT}" 10 "${cmd}"
|
export HELM_SHIMS="${KATA_HYPERVISOR}"
|
||||||
|
export HELM_DEFAULT_SHIM="${KATA_HYPERVISOR}"
|
||||||
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
|
export HELM_CREATE_RUNTIME_CLASSES="true"
|
||||||
# which may cause issues like not having the node properly labeled or the artefacts
|
export HELM_CREATE_DEFAULT_RUNTIME_CLASS="true"
|
||||||
# properly deployed when the tests actually start running.
|
export HELM_ALLOWED_HYPERVISOR_ANNOTATIONS="${ANNOTATIONS}"
|
||||||
if [[ "${platform}" = "aks" ]]; then
|
export HELM_SNAPSHOTTER_HANDLER_MAPPING="${SNAPSHOTTER_HANDLER_MAPPING}"
|
||||||
sleep 240s
|
export HELM_AGENT_HTTPS_PROXY="${HTTPS_PROXY}"
|
||||||
else
|
export HELM_AGENT_NO_PROXY="${NO_PROXY}"
|
||||||
sleep 60s
|
export HELM_PULL_TYPE_MAPPING="${PULL_TYPE_MAPPING}"
|
||||||
fi
|
export HELM_HOST_OS="${HOST_OS}"
|
||||||
|
helm_helper
|
||||||
echo "::group::kata-deploy logs"
|
|
||||||
kubectl_retry -n kube-system logs --tail=100 -l name=kata-deploy
|
|
||||||
echo "::endgroup::"
|
|
||||||
|
|
||||||
echo "::group::Runtime classes"
|
|
||||||
kubectl_retry get runtimeclass
|
|
||||||
echo "::endgroup::"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_kbs_client() {
|
function install_kbs_client() {
|
||||||
|
Loading…
Reference in New Issue
Block a user