Merge pull request #11388 from kata-containers/sprt/azure-oidc

ci: Use OIDC to log into Azure
This commit is contained in:
Aurélien Bombo 2025-06-10 13:08:44 -05:00 committed by GitHub
commit f34010cc94
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 37 additions and 392 deletions

View File

@ -27,7 +27,6 @@ jobs:
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}

View File

@ -22,7 +22,6 @@ jobs:
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}

View File

@ -26,7 +26,6 @@ jobs:
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}

View File

@ -16,6 +16,7 @@ on:
permissions:
contents: read
id-token: write
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
@ -47,7 +48,6 @@ jobs:
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}

View File

@ -21,8 +21,6 @@ on:
AZ_APPID:
required: true
AZ_PASSWORD:
required: true
AZ_TENANT_ID:
required: true
AZ_SUBSCRIPTION_ID:
@ -118,6 +116,5 @@ jobs:
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}

View File

@ -25,8 +25,6 @@ on:
AZ_APPID:
required: true
AZ_PASSWORD:
required: true
AZ_TENANT_ID:
required: true
AZ_SUBSCRIPTION_ID:
@ -40,6 +38,7 @@ on:
permissions:
contents: read
id-token: write
jobs:
build-kata-static-tarball-amd64:
@ -295,7 +294,6 @@ jobs:
target-branch: ${{ inputs.target-branch }}
secrets:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
@ -341,7 +339,6 @@ jobs:
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
ITA_KEY: ${{ secrets.ITA_KEY }}

View File

@ -6,20 +6,21 @@ on:
permissions:
contents: read
id-token: write
jobs:
cleanup-resources:
runs-on: ubuntu-22.04
environment: ci
steps:
- uses: actions/checkout@v4
- name: Log into Azure
env:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
run: bash tests/integration/kubernetes/gha-run.sh login-azure
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:
client-id: ${{ secrets.AZ_APPID }}
tenant-id: ${{ secrets.AZ_TENANT_ID }}
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
- name: Install Python dependencies
run: |

View File

@ -28,8 +28,6 @@ on:
AZ_APPID:
required: true
AZ_PASSWORD:
required: true
AZ_TENANT_ID:
required: true
AZ_SUBSCRIPTION_ID:
@ -38,6 +36,7 @@ on:
permissions:
contents: read
id-token: write
jobs:
run-k8s-tests:
@ -72,6 +71,7 @@ jobs:
instance-type: normal
auto-generate-policy: yes
runs-on: ubuntu-22.04
environment: ci
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
@ -109,12 +109,11 @@ jobs:
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
run: bash tests/integration/kubernetes/gha-run.sh login-azure
env:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:
client-id: ${{ secrets.AZ_APPID }}
tenant-id: ${{ secrets.AZ_TENANT_ID }}
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
- name: Create AKS cluster
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2

View File

@ -28,8 +28,6 @@ on:
AZ_APPID:
required: true
AZ_PASSWORD:
required: true
AZ_TENANT_ID:
required: true
AZ_SUBSCRIPTION_ID:
@ -39,6 +37,7 @@ on:
permissions:
contents: read
id-token: write
jobs:
# Generate jobs for testing CoCo on non-TEE environments
@ -53,6 +52,7 @@ jobs:
pull-type:
- guest-pull
runs-on: ubuntu-22.04
environment: ci
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
@ -94,12 +94,11 @@ jobs:
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
run: bash tests/integration/kubernetes/gha-run.sh login-azure
env:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:
client-id: ${{ secrets.AZ_APPID }}
tenant-id: ${{ secrets.AZ_TENANT_ID }}
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
- name: Create AKS cluster
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2

View File

@ -29,8 +29,6 @@ on:
required: true
AZ_APPID:
required: true
AZ_PASSWORD:
required: true
AZ_TENANT_ID:
required: true
AZ_SUBSCRIPTION_ID:
@ -40,6 +38,7 @@ on:
permissions:
contents: read
id-token: write
jobs:
run-k8s-tests-on-tdx:
@ -223,6 +222,7 @@ jobs:
pull-type:
- guest-pull
runs-on: ubuntu-22.04
environment: ci
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
@ -270,12 +270,11 @@ jobs:
run: bash tests/integration/kubernetes/gha-run.sh install-azure-cli
- name: Log into the Azure account
run: bash tests/integration/kubernetes/gha-run.sh login-azure
env:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:
client-id: ${{ secrets.AZ_APPID }}
tenant-id: ${{ secrets.AZ_TENANT_ID }}
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
- name: Create AKS cluster
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2

View File

@ -24,8 +24,6 @@ on:
secrets:
AZ_APPID:
required: true
AZ_PASSWORD:
required: true
AZ_TENANT_ID:
required: true
AZ_SUBSCRIPTION_ID:
@ -33,6 +31,7 @@ on:
permissions:
contents: read
id-token: write
jobs:
run-kata-deploy-tests:
@ -50,6 +49,7 @@ jobs:
- host_os: cbl-mariner
vmm: clh
runs-on: ubuntu-22.04
environment: ci
env:
DOCKER_REGISTRY: ${{ inputs.registry }}
DOCKER_REPO: ${{ inputs.repo }}
@ -75,12 +75,11 @@ jobs:
run: bash tests/functional/kata-deploy/gha-run.sh install-azure-cli
- name: Log into the Azure account
run: bash tests/functional/kata-deploy/gha-run.sh login-azure
env:
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
uses: azure/login@a457da9ea143d694b1b9c7c869ebb04ebe844ef5 # v2.3.0
with:
client-id: ${{ secrets.AZ_APPID }}
tenant-id: ${{ secrets.AZ_TENANT_ID }}
subscription-id: ${{ secrets.AZ_SUBSCRIPTION_ID }}
- name: Create AKS cluster
uses: nick-fields/retry@ce71cc2ab81d554ebbe88c79ab5975992d79ba08 # v3.0.2

View File

@ -54,7 +54,6 @@ function main() {
case "${action}" in
install-azure-cli) install_azure_cli ;;
login-azure) login_azure ;;
create-cluster) create_cluster "kata-deploy" ;;
deploy-k8s) deploy_k8s ;;
install-bats) install_bats ;;

View File

@ -9,10 +9,6 @@ source "${tests_dir}/common.bash"
kubernetes_dir="${tests_dir}/integration/kubernetes"
helm_chart_dir="${repo_root_dir}/tools/packaging/kata-deploy/helm-chart/kata-deploy"
AZ_APPID="${AZ_APPID:-}"
AZ_PASSWORD="${AZ_PASSWORD:-}"
AZ_SUBSCRIPTION_ID="${AZ_SUBSCRIPTION_ID:-}"
AZ_TENANT_ID="${AZ_TENANT_ID:-}"
GENPOLICY_PULL_METHOD="${GENPOLICY_PULL_METHOD:-oci-distribution}"
GH_PR_NUMBER="${GH_PR_NUMBER:-}"
HELM_DEFAULT_INSTALLATION="${HELM_DEFAULT_INSTALLATION:-false}"
@ -94,17 +90,6 @@ function install_azure_cli() {
az extension add --name aks-preview
}
function login_azure() {
az login \
--service-principal \
-u "${AZ_APPID}" \
-p "${AZ_PASSWORD}" \
--tenant "${AZ_TENANT_ID}"
# Switch to the Kata Containers subscription
az account set --subscription "${AZ_SUBSCRIPTION_ID}"
}
function create_cluster() {
test_type="${1:-k8s}"
local short_sha

View File

@ -567,7 +567,6 @@ function main() {
case "${action}" in
install-azure-cli) install_azure_cli ;;
login-azure) login_azure ;;
create-cluster) create_cluster "" ;;
create-cluster-kcli) create_cluster_kcli ;;
configure-snapshotter) configure_snapshotter ;;

View File

@ -29,7 +29,6 @@ function main() {
action="${1:-}"
case "${action}" in
install-azure-cli) install_azure_cli ;;
login-azure) login_azure ;;
create-cluster) create_cluster ;;
install-bats) install_bats ;;
install-kata-tools) install_kata_tools ;;
@ -46,4 +45,3 @@ function main() {
}
main "$@"

View File

@ -1,31 +0,0 @@
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
FROM mcr.microsoft.com/azure-cli:2.9.1
LABEL com.github.actions.name="Test kata-deploy in an AKS cluster"
LABEL com.github.actions.description="Test kata-deploy in an AKS cluster"
# Default to latest validated AKS-engine version
ARG AKS_ENGINE_VER="v0.62.0"
ARG ARCH=amd64
ENV GITHUB_ACTION_NAME="Test kata-deploy in an AKS cluster"
# When run, we expect the caller (GitHub Action workflow) to provide the
# PKG_SHA environment variable
ENV PKG_SHA=HEAD
RUN curl -LO "https://dl.k8s.io/release/$(curl -s https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl" \
&& chmod +x ./kubectl \
&& mv ./kubectl /usr/local/bin/kubectl
RUN curl -LO "https://github.com/Azure/aks-engine/releases/download/${AKS_ENGINE_VER}/aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}.tar.gz" \
&& tar xvf "aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}.tar.gz" \
&& mv "aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}/aks-engine" /usr/local/bin/aks-engine \
&& rm "aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}.tar.gz"
COPY kubernetes-containerd.json /
COPY setup-aks.sh test-kata.sh entrypoint.sh /
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -1,13 +0,0 @@
# action.yml
name: 'kata-deploy-aks'
description: 'test Kata container image in AKS'
inputs:
packaging-sha:
description: 'SHA we are using for pulling packaging manifests'
required: true
default: ''
runs:
using: 'docker'
image: 'Dockerfile'
args:
- ${{ inputs.packaging-sha }}

View File

@ -1,24 +0,0 @@
#!/usr/bin/env bash
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o pipefail
set -o nounset
# This entrypoint expects an environment variable, PKG_SHA, to be
# within the container runtime. A default is provided in the Dockerfile,
# but we expect the caller to pass this into the container run (ie docker run -e PKG_SHA=foo ...)
echo "provided package reference: ${PKG_SHA}"
# Since this is the entrypoint for the container image, we know that the AKS and Kata setup/testing
# scripts are located at root.
source /setup-aks.sh
source /test-kata.sh
trap destroy_aks EXIT
setup_aks
test_kata

View File

@ -1,43 +0,0 @@
{
"apiVersion": "vlabs",
"properties": {
"orchestratorProfile": {
"orchestratorType": "Kubernetes",
"orchestratorVersion": "1.20.5",
"kubernetesConfig": {
"containerRuntime": "containerd",
"useManagedIdentity": false
}
},
"masterProfile": {
"count": 1,
"dnsPrefix": "",
"vmSize": "Standard_D2_v2"
},
"agentPoolProfiles": [
{
"name": "agentpool",
"count": 1,
"vmSize": "Standard_D4s_v3",
"availabilityProfile": "AvailabilitySet"
}
],
"linuxProfile": {
"adminUsername": "azureuser",
"ssh": {
"publicKeys": [
{
"keyData": ""
}
]
}
},
"servicePrincipalProfile": {
"clientId": "",
"secret": ""
},
"linuxProfile": {
"runUnattendedUpgradesOnBootstrap": false
}
}
}

View File

@ -1,51 +0,0 @@
#!/usr/bin/env bash
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o pipefail
set -o nounset
export AZURE_HTTP_USER_AGENT="GITHUBACTIONS_${GITHUB_ACTION_NAME}_${GITHUB_REPOSITORY}"
LOCATION=${LOCATION:-westus2}
DNS_PREFIX=${DNS_PREFIX:-kata-deploy-${GITHUB_SHA:0:10}}
CLUSTER_CONFIG=${CLUSTER_CONFIG:-/kubernetes-containerd.json}
function die() {
msg="$*"
echo "ERROR: $msg" >&2
exit 1
}
function destroy_aks() {
set +x
export KUBECONFIG="${PWD}/_output/${DNS_PREFIX}/kubeconfig/kubeconfig.${LOCATION}.json"
az login --service-principal -u "$AZ_APPID" -p "$AZ_PASSWORD" --tenant "$AZ_TENANT_ID"
az group delete --name "$DNS_PREFIX" --yes --no-wait
az logout
}
function setup_aks() {
[[ -z "$AZ_APPID" ]] && die "no Azure service principal ID provided"
[[ -z "$AZ_PASSWORD" ]] && die "no Azure service principal secret provided"
[[ -z "$AZ_SUBSCRIPTION_ID" ]] && die "no Azure subscription ID provided"
[[ -z "$AZ_TENANT_ID" ]] && die "no Azure tenant ID provided"
aks-engine deploy --subscription-id "$AZ_SUBSCRIPTION_ID" \
--client-id "$AZ_APPID" --client-secret "$AZ_PASSWORD" \
--location "$LOCATION" --dns-prefix "$DNS_PREFIX" \
--api-model "$CLUSTER_CONFIG" --force-overwrite
export KUBECONFIG="${PWD}/_output/${DNS_PREFIX}/kubeconfig/kubeconfig.${LOCATION}.json"
# wait for the cluster to be settled:
kubectl wait --timeout=10m --for=condition=Ready --all nodes
# make sure coredns is up before moving forward:
kubectl wait --timeout=10m -n kube-system --for=condition=Available deployment/coredns
}

View File

@ -1,162 +0,0 @@
#!/usr/bin/env bash
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
set -o errexit
set -o pipefail
set -o nounset
function die() {
msg="$*"
echo "ERROR: $msg" >&2
exit 1
}
function waitForProcess() {
wait_time="$1"
cmd="$2"
sleep_time=5
echo "waiting for process $cmd"
while [ "$wait_time" -gt 0 ]; do
if eval "$cmd"; then
return 0
else
sleep "$sleep_time"
wait_time=$((wait_time-sleep_time))
fi
done
return 1
}
# waitForLabelRemoval will wait for the kata-runtime labels to removed until a given
# timeout expires
function waitForLabelRemoval() {
wait_time="$1"
sleep_time=5
echo "waiting for kata-runtime label to be removed"
while [[ "$wait_time" -gt 0 ]]; do
# if a node is found which matches node-select, the output will include a column for node name,
# NAME. Let's look for that
if [[ -z $(kubectl get nodes --selector katacontainers.io/kata-runtime 2>&1 | grep NAME) ]]
then
return 0
else
sleep "$sleep_time"
wait_time=$((wait_time-sleep_time))
fi
done
echo $(kubectl get pods,nodes --all-namespaces --show-labels)
echo "failed to cleanup"
return 1
}
function run_test() {
YAMLPATH="./tools/packaging/kata-deploy/"
echo "verify connectivity with a pod using Kata"
deployment=""
busybox_pod="test-nginx"
busybox_image="busybox"
cmd="kubectl get pods | grep $busybox_pod | grep Completed"
wait_time=120
configurations=("nginx-deployment-qemu" "nginx-deployment-clh" "nginx-deployment-dragonball")
for deployment in "${configurations[@]}"; do
# start the kata pod:
kubectl apply -f "$YAMLPATH/examples/${deployment}.yaml"
# in case the control plane is slow, give it a few seconds to accept the yaml, otherwise
# our 'wait' for deployment status will fail to find the deployment at all
sleep 3
kubectl wait --timeout=5m --for=condition=Available deployment/${deployment} || kubectl describe pods
kubectl expose deployment/${deployment}
# test pod connectivity:
kubectl run $busybox_pod --restart=Never --image="$busybox_image" -- wget --timeout=5 "$deployment"
waitForProcess "$wait_time" "$cmd"
kubectl logs "$busybox_pod" | grep "index.html"
kubectl describe pod "$busybox_pod"
# cleanup:
kubectl delete deployment "$deployment"
kubectl delete service "$deployment"
kubectl delete pod "$busybox_pod"
done
}
function test_kata() {
set -x
[[ -z "$PKG_SHA" ]] && die "no PKG_SHA provided"
YAMLPATH="./tools/packaging/kata-deploy/"
# This action could be called in two contexts:
# 1. Packaging workflows: testing in packaging repository, where we assume yaml/packaging
# bits under test are already part of teh action workspace.
# 2. From kata-containers: when creating a release, the appropriate packaging repository is
# not yet part of the workspace, and we will need to clone
if [[ ! -d $YAMLPATH ]]; then
[[ -d $YAMLPATH ]] || git clone https://github.com/kata-containers/kata-containers
cd kata-containers
git fetch
git checkout $PKG_SHA
fi
kubectl apply -f "$YAMLPATH/kata-rbac/base/kata-rbac.yaml"
# apply runtime classes:
kubectl apply -f "$YAMLPATH/runtimeclasses/kata-runtimeClasses.yaml"
kubectl get runtimeclasses
# update deployment daemonset to utilize the container under test:
sed -i "s#quay.io/kata-containers/kata-deploy:latest#quay.io/kata-containers/kata-deploy-ci:${PKG_SHA}#g" $YAMLPATH/kata-deploy/base/kata-deploy.yaml
sed -i "s#quay.io/kata-containers/kata-deploy:latest#quay.io/kata-containers/kata-deploy-ci:${PKG_SHA}#g" $YAMLPATH/kata-cleanup/base/kata-cleanup.yaml
cat $YAMLPATH/kata-deploy/base/kata-deploy.yaml
# deploy kata:
kubectl apply -f $YAMLPATH/kata-deploy/base/kata-deploy.yaml
# in case the control plane is slow, give it a few seconds to accept the yaml, otherwise
# our 'wait' for deployment status will fail to find the deployment at all. If it can't persist
# the daemonset to etcd in 30 seconds... then we'll fail.
sleep 30
# wait for kata-deploy to be up
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
# show running pods, and labels of nodes
kubectl get pods,nodes --all-namespaces --show-labels
run_test
kubectl get pods,nodes --show-labels
# Remove Kata
kubectl delete -f $YAMLPATH/kata-deploy/base/kata-deploy.yaml
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
kubectl get pods,nodes --show-labels
kubectl apply -f $YAMLPATH/kata-cleanup/base/kata-cleanup.yaml
# The cleanup daemonset will run a single time, since it will clear the node-label. Thus, its difficult to
# check the daemonset's status for completion. instead, let's wait until the kata-runtime labels are removed
# from all of the worker nodes. If this doesn't happen after 2 minutes, let's fail
timeout=120
waitForLabelRemoval $timeout
kubectl delete -f $YAMLPATH/kata-cleanup/base/kata-cleanup.yaml
set +x
}