testing: add workflows for testing kata-deploy

1. AKS based action updated to be run from either packaging or remote
repository. We will only clone kata-deploy for yaml/scripts/tests if we
are running the action outside of the packaging repo. If in packaging,
the bits are already included. Misc. cleanup as well.

2. Workflow introduced which leverages the updated AKS action. This will
allow testing of packaging changes to kata-deploy.

The workflow itself uses the following github action: xt0rted/slash-command-action

The workflow will create a kata-deploy container image based off of the latest
release, utilizing the latest released Kata artifacts off of master. It
will then use the AKS kata-deploy GitHub action.

Users with admin access on the repo can trigger this test by:
/test kata-deploy

Fixes: #845

Signed-off-by: Eric Ernst <eric.ernst@intel.com>
This commit is contained in:
Eric Ernst 2019-12-02 13:40:17 -08:00
parent e8e4d75a91
commit f184afc4b8
5 changed files with 90 additions and 40 deletions

54
.github/workflows/kata-deploy-test.yaml vendored Normal file
View File

@ -0,0 +1,54 @@
on: issue_comment
name: test-kata-deploy
jobs:
check_comments:
runs-on: ubuntu-latest
steps:
- name: Check for Command
id: command
uses: kata-containers/slash-command-action@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
command: "test"
reaction: "true"
reaction-type: "eyes"
allow-edits: "false"
permission-level: admin
- name: verify command arg is kata-deploy
run: |
echo "The command was '${{ steps.command.outputs.command-name }}' with arguments '${{ steps.command.outputs.command-arguments }}'"
[[ ${{ steps.command.outputs.command-arguments}} == "kata-deploy" ]]
create-and-test-container:
needs: check_comments
runs-on: ubuntu-latest
steps:
- name: get-PR-ref
id: get-PR-ref
run: |
ref=$(cat $GITHUB_EVENT_PATH | jq -r '.issue.pull_request.url' | sed 's#^.*\/pulls#refs\/pull#' | sed 's#$#\/merge#')
echo "reference for PR: " ${ref}
echo "##[set-output name=pr-ref;]${ref}"
- uses: actions/checkout@v2-beta
with:
ref: ${{ steps.get-PR-ref.outputs.pr-ref }}
- name: build-container-image
id: build-container-image
run: |
PR_SHA=$(git log --format=format:%H -n1)
VERSION=$(curl https://raw.githubusercontent.com/kata-containers/runtime/master/VERSION)
ARTIFACT_URL="https://github.com/kata-containers/runtime/releases/download/${VERSION}/kata-static-${VERSION}-x86_64.tar.xz"
wget "${ARTIFACT_URL}" -O ./kata-deploy/kata-static.tar.xz
docker build --build-arg KATA_ARTIFACTS=kata-static.tar.xz -t katadocker/kata-deploy-ci:${PR_SHA} ./kata-deploy
docker login -u ${{ secrets.DOCKER_USERNAME }} -p ${{ secrets.DOCKER_PASSWORD }}
docker push katadocker/kata-deploy-ci:$PR_SHA
echo "##[set-output name=pr-sha;]${PR_SHA}"
- name: test-kata-deploy-ci-in-aks
uses: ./kata-deploy/action
with:
packaging-sha: ${{ steps.build-container-image.outputs.pr-sha }}
env:
PKG_SHA: ${{ steps.build-container-image.outputs.pr-sha }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_PASSWORD: ${{ secrets.AZ_PASSWORD }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}

View File

@ -1,9 +1,9 @@
# action.yml
name: 'kata-deploy'
name: 'kata-deploy-aks'
description: 'test Kata container image in AKS'
inputs:
packaging-sha:
description: 'SHA we are using for pulling packaing manifests'
description: 'SHA we are using for pulling packaging manifests'
required: true
default: ''
runs:

View File

@ -23,9 +23,7 @@ function die() {
function destroy_aks() {
set +x
export KUBECONFIG="_output/$DNS_PREFIX/kubeconfig/kubeconfig.$LOCATION.json"
kubectl describe ds -n kube-system kata-deploy || true
kubectl describe ds -n kube-system kata-cleanup || true
export KUBECONFIG="$PWD/_output/$DNS_PREFIX/kubeconfig/kubeconfig.$LOCATION.json"
az login --service-principal -u "$AZ_APPID" -p "$AZ_PASSWORD" --tenant "$AZ_TENANT_ID"
az group delete --name "$DNS_PREFIX" --yes --no-wait

View File

@ -17,13 +17,13 @@ function die() {
function waitForProcess() {
wait_time="$1"
sleep_time="$2"
cmd="$3"
cmd="$2"
sleep_time=5
echo "waiting for process $cmd"
while [ "$wait_time" -gt 0 ]; do
if eval "$cmd"; then
return 0
else
echo "waiting"
sleep "$sleep_time"
wait_time=$((wait_time-sleep_time))
fi
@ -35,16 +35,16 @@ function waitForProcess() {
# timeout expires
function waitForLabelRemoval() {
wait_time="$1"
sleep_time="$2"
sleep_time=5
echo "waiting for kata-runtime label to be removed"
while [[ "$wait_time" -gt 0 ]]; do
# if a node is found which matches node-select, the output will include a column for node name,
# NAME. Let's look for that
if [[ -z $(kubectl get nodes --selector katacontainers.io/kata-runtime | grep NAME) ]]
if [[ -z $(kubectl get nodes --selector katacontainers.io/kata-runtime 2>&1 | grep NAME) ]]
then
return 0
else
echo "waiting for kata-runtime label to be removed"
sleep "$sleep_time"
wait_time=$((wait_time-sleep_time))
fi
@ -56,10 +56,8 @@ function waitForLabelRemoval() {
return 1
}
function run_test() {
PKG_SHA=$1
YAMLPATH="https://raw.githubusercontent.com/kata-containers/packaging/$PKG_SHA/kata-deploy"
YAMLPATH="./kata-deploy"
echo "verify connectivity with a pod using Kata"
deployment=""
@ -67,7 +65,6 @@ function run_test() {
busybox_image="busybox"
cmd="kubectl get pods | grep $busybox_pod | grep Completed"
wait_time=120
sleep_time=3
configurations=("nginx-deployment-qemu" "nginx-deployment-qemu-virtiofs")
for deployment in "${configurations[@]}"; do
@ -83,7 +80,7 @@ function run_test() {
# test pod connectivity:
kubectl run $busybox_pod --restart=Never --image="$busybox_image" -- wget --timeout=5 "$deployment"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
waitForProcess "$wait_time" "$cmd"
kubectl logs "$busybox_pod" | grep "index.html"
kubectl describe pod "$busybox_pod"
@ -99,12 +96,19 @@ function test_kata() {
set -x
[[ -z "$PKG_SHA" ]] && die "no PKG_SHA provided"
echo "$PKG_SHA"
#kubectl all the things
kubectl get pods,nodes --all-namespaces
# This action could be called in two contexts:
# 1. Packaging workflows: testing in packaging repository, where we assume yaml/packaging
# bits under test are already part of teh action workspace.
# 2. From kata-containers: when creating a release, the appropriate packaging repository is
# not yet part of the workspace, and we will need to clone
if [[ ! -d ./kata-deploy ]]; then
git clone https://github.com/kata-containers/packaging packaging
cd packaging
git checkout $PKG_SHA
fi
YAMLPATH="https://raw.githubusercontent.com/kata-containers/packaging/$PKG_SHA/kata-deploy"
YAMLPATH="./kata-deploy"
kubectl apply -f "$YAMLPATH/kata-rbac/base/kata-rbac.yaml"
@ -114,17 +118,14 @@ function test_kata() {
kubectl get runtimeclasses
curl -LO "$YAMLPATH/kata-deploy/base/kata-deploy.yaml"
curl -LO "$YAMLPATH/kata-cleanup/base/kata-cleanup.yaml"
# update deployment daemonset to utilize the container under test:
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${PKG_SHA}#g" kata-deploy.yaml
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${PKG_SHA}#g" kata-cleanup.yaml
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${PKG_SHA}#g" $YAMLPATH/kata-deploy/base/kata-deploy.yaml
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${PKG_SHA}#g" $YAMLPATH/kata-cleanup/base/kata-cleanup.yaml
cat kata-deploy.yaml
cat $YAMLPATH/kata-deploy/base/kata-deploy.yaml
# deploy kata:
kubectl apply -f kata-deploy.yaml
kubectl apply -f $YAMLPATH/kata-deploy/base/kata-deploy.yaml
# in case the control plane is slow, give it a few seconds to accept the yaml, otherwise
# our 'wait' for deployment status will fail to find the deployment at all. If it can't persist
@ -137,29 +138,25 @@ function test_kata() {
# show running pods, and labels of nodes
kubectl get pods,nodes --all-namespaces --show-labels
run_test $PKG_SHA
run_test
kubectl get pods,nodes --show-labels
# Remove Kata
kubectl delete -f kata-deploy.yaml
kubectl delete -f $YAMLPATH/kata-deploy/base/kata-deploy.yaml
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
kubectl get pods,nodes --show-labels
kubectl apply -f kata-cleanup.yaml
kubectl apply -f $YAMLPATH/kata-cleanup/base/kata-cleanup.yaml
# The cleanup daemonset will run a single time, since it will clear the node-label. Thus, its difficult to
# check the daemonset's status for completion. instead, let's wait until the kata-runtime labels are removed
# from all of the worker nodes. If this doesn't happen after 2 minutes, let's fail
timeout=20
sleeptime=6
waitForLabelRemoval $timeout $sleeptime
timeout=120
waitForLabelRemoval $timeout
kubectl delete -f kata-cleanup.yaml
rm kata-cleanup.yaml
rm kata-deploy.yaml
kubectl delete -f $YAMLPATH/kata-cleanup/base/kata-cleanup.yaml
set +x
}

View File

@ -32,18 +32,19 @@ function print_usage() {
}
function get_container_runtime() {
local runtime="$(kubectl describe node $NODE_NAME)"
local runtime=$(kubectl get node $NODE_NAME -o jsonpath='{.status.nodeInfo.containerRuntimeVersion}' | awk -F '[:]' '{print $1}')
if [ "$?" -ne 0 ]; then
die "invalid node name"
fi
if echo "$runtime" | grep -qE 'Container Runtime Version.*containerd.*-k3s'; then
if echo "$runtime" | grep -qE 'containerd.*-k3s'; then
if systemctl is-active --quiet k3s-agent; then
echo "k3s-agent"
else
echo "k3s"
fi
else
echo "$runtime" | awk -F'[:]' '/Container Runtime Version/ {print $2}' | tr -d ' '
echo "$runtime"
fi
}