mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-27 07:48:55 +00:00
kata-deploy: Add a simple GitHub Action
Create a container based action which will test a Kata artifact tarball in the kata-deploy daemonset on AKS. This AZ credentials are available from the callers environment. Signed-off-by: Eric Ernst <eric.ernst@intel.com>
This commit is contained in:
parent
4eb376b40e
commit
9a7d6922b4
31
kata-deploy/action/Dockerfile
Normal file
31
kata-deploy/action/Dockerfile
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
FROM microsoft/azure-cli:latest
|
||||
|
||||
LABEL com.github.actions.name="Test kata-deploy in an AKS cluster"
|
||||
LABEL com.github.actions.description="Test kata-deploy in an AKS cluster"
|
||||
|
||||
# Default to latest validated AKS-engine version
|
||||
ARG AKS_ENGINE_VER="v0.42.0"
|
||||
ARG ARCH=amd64
|
||||
|
||||
ENV GITHUB_ACTION_NAME="Test kata-deploy in an AKS cluster"
|
||||
|
||||
# When run, we expect the caller (GitHub Action workflow) to provide the
|
||||
# PKG_SHA environment variable
|
||||
ENV PKG_SHA=HEAD
|
||||
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/${ARCH}/kubectl \
|
||||
&& chmod +x ./kubectl \
|
||||
&& mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
RUN curl -LO https://github.com/Azure/aks-engine/releases/download/${AKS_ENGINE_VER}/aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}.tar.gz \
|
||||
&& tar xvf aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}.tar.gz \
|
||||
&& mv aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}/aks-engine /usr/local/bin/aks-engine \
|
||||
&& rm aks-engine-${AKS_ENGINE_VER}-linux-${ARCH}.tar.gz
|
||||
|
||||
COPY kubernetes-containerd.json /
|
||||
COPY setup-aks.sh test-kata.sh entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
13
kata-deploy/action/action.yaml
Normal file
13
kata-deploy/action/action.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
# action.yml
|
||||
name: 'kata-deploy'
|
||||
description: 'test Kata container image in AKS'
|
||||
inputs:
|
||||
packaging-sha:
|
||||
description: 'SHA we are using for pulling packaing manifests'
|
||||
required: true
|
||||
default: ''
|
||||
runs:
|
||||
using: 'docker'
|
||||
image: 'Dockerfile'
|
||||
args:
|
||||
- ${{ inputs.packaging-sha }}
|
24
kata-deploy/action/entrypoint.sh
Executable file
24
kata-deploy/action/entrypoint.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2018 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
# This entrypoint expects an environment variable, PKG_SHA, to be
|
||||
# within the container runtime. A default is provided in the Dockerfile,
|
||||
# but we expect the caller to pass this into the container run (ie docker run -e PKG_SHA=foo ...)
|
||||
echo "provided package reference: ${PKG_SHA}"
|
||||
|
||||
# Since this is the entrypoint for the container image, we know that the AKS and Kata setup/testing
|
||||
# scripts are located at root.
|
||||
source /setup-aks.sh
|
||||
source /test-kata.sh
|
||||
|
||||
trap destroy_aks EXIT
|
||||
|
||||
setup_aks
|
||||
test_kata
|
41
kata-deploy/action/kubernetes-containerd.json
Normal file
41
kata-deploy/action/kubernetes-containerd.json
Normal file
@ -0,0 +1,41 @@
|
||||
{
|
||||
"apiVersion": "vlabs",
|
||||
"properties": {
|
||||
"orchestratorProfile": {
|
||||
"orchestratorType": "Kubernetes",
|
||||
"orchestratorVersion": "1.15.4",
|
||||
"kubernetesConfig": {
|
||||
"networkPlugin": "flannel",
|
||||
"containerRuntime": "containerd",
|
||||
"containerdVersion": "1.2.4"
|
||||
}
|
||||
},
|
||||
"masterProfile": {
|
||||
"count": 1,
|
||||
"dnsPrefix": "",
|
||||
"vmSize": "Standard_D2_v2"
|
||||
},
|
||||
"agentPoolProfiles": [
|
||||
{
|
||||
"name": "agentpool",
|
||||
"count": 1,
|
||||
"vmSize": "Standard_D4s_v3",
|
||||
"availabilityProfile": "AvailabilitySet"
|
||||
}
|
||||
],
|
||||
"linuxProfile": {
|
||||
"adminUsername": "azureuser",
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"keyData": ""
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"servicePrincipalProfile": {
|
||||
"clientId": "",
|
||||
"secret": ""
|
||||
}
|
||||
}
|
||||
}
|
53
kata-deploy/action/setup-aks.sh
Executable file
53
kata-deploy/action/setup-aks.sh
Executable file
@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
export AZURE_HTTP_USER_AGENT="GITHUBACTIONS_${GITHUB_ACTION_NAME}_${GITHUB_REPOSITORY}"
|
||||
|
||||
LOCATION=${LOCATION:-westus2}
|
||||
DNS_PREFIX=${DNS_PREFIX:-kata-deploy-${GITHUB_SHA:0:10}}
|
||||
CLUSTER_CONFIG=${CLUSTER_CONFIG:-/kubernetes-containerd.json}
|
||||
|
||||
function die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function destroy_aks() {
|
||||
set +x
|
||||
|
||||
export KUBECONFIG="_output/$DNS_PREFIX/kubeconfig/kubeconfig.$LOCATION.json"
|
||||
kubectl describe ds -n kube-system kata-deploy || true
|
||||
kubectl describe ds -n kube-system kata-cleanup || true
|
||||
|
||||
az login --service-principal -u "$AZ_APPID" -p "$AZ_PASSWORD" --tenant "$AZ_TENANT_ID"
|
||||
az group delete --name "$DNS_PREFIX" --yes --no-wait
|
||||
az logout
|
||||
}
|
||||
|
||||
function setup_aks() {
|
||||
[[ -z "$AZ_APPID" ]] && die "no Azure service principal ID provided"
|
||||
[[ -z "$AZ_PASSWORD" ]] && die "no Azure service principal secret provided"
|
||||
[[ -z "$AZ_SUBSCRIPTION_ID" ]] && die "no Azure subscription ID provided"
|
||||
[[ -z "$AZ_TENANT_ID" ]] && die "no Azure tenant ID provided"
|
||||
|
||||
aks-engine deploy --subscription-id "$AZ_SUBSCRIPTION_ID" \
|
||||
--client-id "$AZ_APPID" --client-secret "$AZ_PASSWORD" \
|
||||
--location "$LOCATION" --dns-prefix "$DNS_PREFIX" \
|
||||
--api-model "$CLUSTER_CONFIG" --force-overwrite
|
||||
|
||||
export KUBECONFIG="_output/$DNS_PREFIX/kubeconfig/kubeconfig.$LOCATION.json"
|
||||
|
||||
# wait for the cluster to be settled:
|
||||
kubectl wait --timeout=10m --for=condition=Ready --all nodes
|
||||
|
||||
# make sure coredns is up before moving forward:
|
||||
kubectl wait --timeout=5m -n kube-system --for=condition=Available deployment.extensions/coredns
|
||||
}
|
163
kata-deploy/action/test-kata.sh
Executable file
163
kata-deploy/action/test-kata.sh
Executable file
@ -0,0 +1,163 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2019 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
function die() {
|
||||
msg="$*"
|
||||
echo "ERROR: $msg" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
function waitForProcess() {
|
||||
wait_time="$1"
|
||||
sleep_time="$2"
|
||||
cmd="$3"
|
||||
while [ "$wait_time" -gt 0 ]; do
|
||||
if eval "$cmd"; then
|
||||
return 0
|
||||
else
|
||||
echo "waiting"
|
||||
sleep "$sleep_time"
|
||||
wait_time=$((wait_time-sleep_time))
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# waitForLabelRemoval will wait for the kata-runtime labels to removed until a given
|
||||
# timeout expires
|
||||
function waitForLabelRemoval() {
|
||||
wait_time="$1"
|
||||
sleep_time="$2"
|
||||
|
||||
while [[ "$wait_time" -gt 0 ]]; do
|
||||
# if a node is found which matches node-select, the output will include a column for node name,
|
||||
# NAME. Let's look for that
|
||||
if [[ -z $(kubectl get nodes --selector katacontainers.io/kata-runtime | grep NAME) ]]
|
||||
then
|
||||
return 0
|
||||
else
|
||||
echo "waiting for kata-runtime label to be removed"
|
||||
sleep "$sleep_time"
|
||||
wait_time=$((wait_time-sleep_time))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "failed to cleanup"
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
function run_test() {
|
||||
PKG_SHA=$1
|
||||
YAMLPATH="https://raw.githubusercontent.com/amshinde/kata-packaging/$PKG_SHA/kata-deploy"
|
||||
echo "verify connectivity with a pod using Kata"
|
||||
|
||||
deployment=""
|
||||
busybox_pod="test-nginx"
|
||||
busybox_image="busybox"
|
||||
cmd="kubectl get pods | grep $busybox_pod | grep Completed"
|
||||
wait_time=120
|
||||
sleep_time=3
|
||||
|
||||
configurations=("nginx-deployment-qemu" "nginx-deployment-qemu-virtiofs")
|
||||
for deployment in "${configurations[@]}"; do
|
||||
# start the kata pod:
|
||||
kubectl apply -f "$YAMLPATH/examples/${deployment}.yaml"
|
||||
|
||||
# in case the control plane is slow, give it a few seconds to accept the yaml, otherwise
|
||||
# our 'wait' for deployment status will fail to find the deployment at all
|
||||
sleep 3
|
||||
|
||||
kubectl wait --timeout=5m --for=condition=Available deployment/${deployment}
|
||||
kubectl expose deployment/${deployment}
|
||||
|
||||
# test pod connectivity:
|
||||
kubectl run $busybox_pod --restart=Never --image="$busybox_image" -- wget --timeout=5 "$deployment"
|
||||
waitForProcess "$wait_time" "$sleep_time" "$cmd"
|
||||
kubectl logs "$busybox_pod" | grep "index.html"
|
||||
kubectl describe pod "$busybox_pod"
|
||||
|
||||
# cleanup:
|
||||
kubectl delete deployment "$deployment"
|
||||
kubectl delete service "$deployment"
|
||||
kubectl delete pod "$busybox_pod"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function test_kata() {
|
||||
set -x
|
||||
|
||||
[[ -z "$PKG_SHA" ]] && die "no PKG_SHA provided"
|
||||
echo "$PKG_SHA"
|
||||
|
||||
#kubectl all the things
|
||||
kubectl get pods,nodes --all-namespaces
|
||||
|
||||
YAMLPATH="https://raw.githubusercontent.com/amshinde/kata-packaging/$PKG_SHA/kata-deploy"
|
||||
|
||||
kubectl apply -f "$YAMLPATH/kata-rbac.yaml"
|
||||
|
||||
# apply runtime classes:
|
||||
kubectl apply -f "$YAMLPATH/k8s-1.14/kata-qemu-runtimeClass.yaml"
|
||||
kubectl apply -f "$YAMLPATH/k8s-1.14/kata-qemu-virtiofs-runtimeClass.yaml"
|
||||
|
||||
kubectl get runtimeclasses
|
||||
|
||||
curl -LO "$YAMLPATH/kata-deploy.yaml"
|
||||
curl -LO "$YAMLPATH/kata-cleanup.yaml"
|
||||
|
||||
# update deployment daemonset to utilize the container under test:
|
||||
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${PKG_SHA}#g" kata-deploy.yaml
|
||||
sed -i "s#katadocker/kata-deploy#katadocker/kata-deploy-ci:${PKG_SHA}#g" kata-cleanup.yaml
|
||||
|
||||
cat kata-deploy.yaml
|
||||
|
||||
# deploy kata:
|
||||
kubectl apply -f kata-deploy.yaml
|
||||
|
||||
# in case the control plane is slow, give it a few seconds to accept the yaml, otherwise
|
||||
# our 'wait' for deployment status will fail to find the deployment at all. If it can't persist
|
||||
# the daemonset to etcd in 30 seconds... then we'll fail.
|
||||
sleep 30
|
||||
|
||||
# wait for kata-deploy to be up
|
||||
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||
|
||||
# show running pods, and labels of nodes
|
||||
kubectl get pods,nodes --all-namespaces --show-labels
|
||||
|
||||
run_test $PKG_SHA
|
||||
|
||||
kubectl get pods,nodes --show-labels
|
||||
|
||||
# Remove Kata
|
||||
kubectl delete -f kata-deploy.yaml
|
||||
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||
|
||||
kubectl get pods,nodes --show-labels
|
||||
|
||||
kubectl apply -f kata-cleanup.yaml
|
||||
|
||||
# The cleanup daemonset will run a single time, since it will clear the node-label. Thus, its difficult to
|
||||
# check the daemonset's status for completion. instead, let's wait until the kata-runtime labels are removed
|
||||
# from all of the worker nodes. If this doesn't happen in 45 seconds, let's fail
|
||||
timeout=45
|
||||
sleeptime=1
|
||||
waitForLabelRemoval $timeout $sleeptime
|
||||
|
||||
kubectl delete -f kata-cleanup.yaml
|
||||
|
||||
rm kata-cleanup.yaml
|
||||
rm kata-deploy.yaml
|
||||
|
||||
set +x
|
||||
}
|
20
kata-deploy/examples/nginx-deployment-fc.yaml
Normal file
20
kata-deploy/examples/nginx-deployment-fc.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment-fc
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
runtimeClassName: kata-fc
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14
|
||||
ports:
|
||||
- containerPort: 80
|
20
kata-deploy/examples/nginx-deployment-nemu.yaml
Normal file
20
kata-deploy/examples/nginx-deployment-nemu.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment-nemu
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
runtimeClassName: kata-nemu
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14
|
||||
ports:
|
||||
- containerPort: 80
|
20
kata-deploy/examples/nginx-deployment-qemu-virtiofs.yaml
Normal file
20
kata-deploy/examples/nginx-deployment-qemu-virtiofs.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment-qemu-virtiofs
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
runtimeClassName: kata-qemu-virtiofs
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14
|
||||
ports:
|
||||
- containerPort: 80
|
20
kata-deploy/examples/nginx-deployment-qemu.yaml
Normal file
20
kata-deploy/examples/nginx-deployment-qemu.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment-qemu
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
runtimeClassName: kata-qemu
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14
|
||||
ports:
|
||||
- containerPort: 80
|
Loading…
Reference in New Issue
Block a user