mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-08-14 14:14:15 +00:00
Merge pull request #7621 from wainersm/gha-run-local
ci: k8s: adapt gha-run.sh to run locally
This commit is contained in:
commit
d311c3dd04
@ -107,6 +107,11 @@ function delete_cluster() {
|
||||
--yes
|
||||
}
|
||||
|
||||
function delete_cluster_kcli() {
|
||||
CLUSTER_NAME="${CLUSTER_NAME:-kata-k8s}"
|
||||
kcli delete -y kube "$CLUSTER_NAME"
|
||||
}
|
||||
|
||||
function get_nodes_and_pods_info() {
|
||||
kubectl debug $(kubectl get nodes -o name) -it --image=quay.io/kata-containers/kata-debug:latest || true
|
||||
kubectl get pods -o name | grep node-debugger | xargs kubectl delete || true
|
||||
@ -165,6 +170,44 @@ function deploy_k3s() {
|
||||
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||
}
|
||||
|
||||
function create_cluster_kcli() {
|
||||
CLUSTER_NAME="${CLUSTER_NAME:-kata-k8s}"
|
||||
|
||||
delete_cluster_kcli || true
|
||||
|
||||
kcli create kube "${KUBE_TYPE:-generic}" \
|
||||
-P domain="kata.com" \
|
||||
-P pool="${LIBVIRT_POOL:-default}" \
|
||||
-P ctlplanes="${CLUSTER_CONTROL_NODES:-1}" \
|
||||
-P workers="${CLUSTER_WORKERS:-1}" \
|
||||
-P network="${LIBVIRT_NETWORK:-default}" \
|
||||
-P image="${CLUSTER_IMAGE:-ubuntu2004}" \
|
||||
-P sdn=flannel \
|
||||
-P nfs=false \
|
||||
-P disk_size="${CLUSTER_DISK_SIZE:-20}" \
|
||||
"${CLUSTER_NAME}"
|
||||
|
||||
export KUBECONFIG="$HOME/.kcli/clusters/$CLUSTER_NAME/auth/kubeconfig"
|
||||
|
||||
local cmd="kubectl get nodes | grep '.*worker.*\<Ready\>'"
|
||||
echo "Wait at least one worker be Ready"
|
||||
if ! waitForProcess "330" "30" "$cmd"; then
|
||||
echo "ERROR: worker nodes not ready."
|
||||
kubectl get nodes
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Ensure that system pods are running or completed.
|
||||
cmd="[ \$(kubectl get pods -A --no-headers | grep -v 'Running\|Completed' | wc -l) -eq 0 ]"
|
||||
echo "Wait system pods be running or completed"
|
||||
if ! waitForProcess "90" "30" "$cmd"; then
|
||||
echo "ERROR: not all pods are Running or Completed."
|
||||
kubectl get pods -A
|
||||
kubectl get pods -A
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function deploy_rke2() {
|
||||
curl -sfL https://get.rke2.io | sudo sh -
|
||||
|
||||
|
@ -10,8 +10,16 @@ set -o pipefail
|
||||
|
||||
kubernetes_dir="$(dirname "$(readlink -f "$0")")"
|
||||
source "${kubernetes_dir}/../../gha-run-k8s-common.sh"
|
||||
# shellcheck disable=2154
|
||||
tools_dir="${repo_root_dir}/tools"
|
||||
|
||||
DOCKER_REGISTRY=${DOCKER_REGISTRY:-quay.io}
|
||||
DOCKER_REPO=${DOCKER_REPO:-kata-containers/kata-deploy-ci}
|
||||
DOCKER_TAG=${DOCKER_TAG:-kata-containers-latest}
|
||||
KATA_DEPLOY_WAIT_TIMEOUT=${KATA_DEPLOY_WAIT_TIMEOUT:-10m}
|
||||
KATA_HYPERVISOR=${KATA_HYPERVISOR:-qemu}
|
||||
KUBERNETES="${KUBERNETES:-}"
|
||||
|
||||
function configure_devmapper() {
|
||||
sudo mkdir -p /var/lib/containerd/devmapper
|
||||
sudo truncate --size 10G /var/lib/containerd/devmapper/data-disk.img
|
||||
@ -91,7 +99,10 @@ function deploy_kata() {
|
||||
platform="${1}"
|
||||
ensure_yq
|
||||
|
||||
# Emsure we're in the default namespace
|
||||
[ "$platform" = "kcli" ] && \
|
||||
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
||||
|
||||
# Ensure we're in the default namespace
|
||||
kubectl config set-context --current --namespace=default
|
||||
|
||||
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
@ -114,7 +125,7 @@ function deploy_kata() {
|
||||
|
||||
echo "::group::Final kata-deploy.yaml that is used in the test"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
|
||||
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" || die "Failed to setup the tests image"
|
||||
echo "::endgroup::"
|
||||
|
||||
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
||||
@ -123,7 +134,7 @@ function deploy_kata() {
|
||||
else
|
||||
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||
fi
|
||||
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
||||
kubectl -n kube-system wait --timeout="${KATA_DEPLOY_WAIT_TIMEOUT}" --for=condition=Ready -l name=kata-deploy pod
|
||||
|
||||
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
|
||||
# which may cause issues like not having the node properly labeled or the artefacts
|
||||
@ -144,11 +155,16 @@ function deploy_kata() {
|
||||
}
|
||||
|
||||
function run_tests() {
|
||||
platform="${1:-}"
|
||||
|
||||
[ "$platform" = "kcli" ] && \
|
||||
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
||||
|
||||
# Delete any spurious tests namespace that was left behind
|
||||
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
|
||||
|
||||
# Create a new namespace for the tests and switch to it
|
||||
kubectl apply -f ${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml
|
||||
kubectl apply -f "${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml"
|
||||
kubectl config set-context --current --namespace=kata-containers-k8s-tests
|
||||
|
||||
pushd "${kubernetes_dir}"
|
||||
@ -162,6 +178,9 @@ function cleanup() {
|
||||
test_type="${2:-k8s}"
|
||||
ensure_yq
|
||||
|
||||
[ "$platform" = "kcli" ] && \
|
||||
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
||||
|
||||
echo "Gather information about the nodes and pods before cleaning up the node"
|
||||
get_nodes_and_pods_info
|
||||
|
||||
@ -182,6 +201,7 @@ function cleanup() {
|
||||
cleanup_spec="-f "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml""
|
||||
fi
|
||||
|
||||
# shellcheck disable=2086
|
||||
kubectl delete ${deploy_spec}
|
||||
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||
|
||||
@ -196,10 +216,12 @@ function cleanup() {
|
||||
|
||||
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
||||
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
|
||||
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" || die "Failed to setup the tests image"
|
||||
# shellcheck disable=2086
|
||||
kubectl apply ${cleanup_spec}
|
||||
sleep 180s
|
||||
|
||||
# shellcheck disable=2086
|
||||
kubectl delete ${cleanup_spec}
|
||||
kubectl delete -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
||||
}
|
||||
@ -214,6 +236,7 @@ function main() {
|
||||
install-azure-cli) install_azure_cli ;;
|
||||
login-azure) login_azure ;;
|
||||
create-cluster) create_cluster ;;
|
||||
create-cluster-kcli) create_cluster_kcli ;;
|
||||
configure-snapshotter) configure_snapshotter ;;
|
||||
setup-crio) setup_crio ;;
|
||||
deploy-k8s) deploy_k8s ;;
|
||||
@ -221,16 +244,20 @@ function main() {
|
||||
install-kubectl) install_kubectl ;;
|
||||
get-cluster-credentials) get_cluster_credentials ;;
|
||||
deploy-kata-aks) deploy_kata "aks" ;;
|
||||
deploy-kata-kcli) deploy_kata "kcli" ;;
|
||||
deploy-kata-sev) deploy_kata "sev" ;;
|
||||
deploy-kata-snp) deploy_kata "snp" ;;
|
||||
deploy-kata-tdx) deploy_kata "tdx" ;;
|
||||
deploy-kata-garm) deploy_kata "garm" ;;
|
||||
run-tests) run_tests ;;
|
||||
run-tests-kcli) run_tests "kcli" ;;
|
||||
cleanup-kcli) cleanup "kcli" ;;
|
||||
cleanup-sev) cleanup "sev" ;;
|
||||
cleanup-snp) cleanup "snp" ;;
|
||||
cleanup-tdx) cleanup "tdx" ;;
|
||||
cleanup-garm) cleanup "garm" ;;
|
||||
delete-cluster) cleanup "aks" ;;
|
||||
delete-cluster-kcli) delete_cluster_kcli ;;
|
||||
*) >&2 echo "Invalid argument"; exit 2 ;;
|
||||
esac
|
||||
}
|
||||
|
@ -14,7 +14,8 @@ setup() {
|
||||
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
|
||||
pod_name="test-file-volume"
|
||||
container_name="busybox-file-volume-container"
|
||||
tmp_file=$(exec_host mktemp /tmp/file-volume-test-foo.XXXXX)
|
||||
node="$(get_one_kata_node)"
|
||||
tmp_file=$(exec_host "$node" mktemp /tmp/file-volume-test-foo.XXXXX)
|
||||
mount_path="/tmp/foo.txt"
|
||||
file_body="test"
|
||||
get_pod_config_dir
|
||||
@ -22,11 +23,12 @@ setup() {
|
||||
|
||||
@test "Test readonly volume for pods" {
|
||||
# Write test body to temp file
|
||||
exec_host "echo "$file_body" > $tmp_file"
|
||||
exec_host "$node" "echo "$file_body" > $tmp_file"
|
||||
|
||||
# Create test yaml
|
||||
sed -e "s|HOST_FILE|$tmp_file|" ${pod_config_dir}/pod-file-volume.yaml > ${pod_config_dir}/test-pod-file-volume.yaml
|
||||
sed -i "s|MOUNT_PATH|$mount_path|" ${pod_config_dir}/test-pod-file-volume.yaml
|
||||
sed -i "s|NODE|$node|" ${pod_config_dir}/test-pod-file-volume.yaml
|
||||
|
||||
# Create pod
|
||||
kubectl create -f "${pod_config_dir}/test-pod-file-volume.yaml"
|
||||
@ -43,6 +45,6 @@ teardown() {
|
||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
|
||||
kubectl delete pod "$pod_name"
|
||||
exec_host rm -f $tmp_file
|
||||
exec_host "$node" rm -f $tmp_file
|
||||
rm -f ${pod_config_dir}/test-pod-file-volume.yaml.yaml
|
||||
}
|
||||
|
@ -15,13 +15,17 @@ setup() {
|
||||
|
||||
get_pod_config_dir
|
||||
|
||||
tmp_file=$(exec_host mktemp -d /tmp/data.XXXX)
|
||||
node=$(get_one_kata_node)
|
||||
tmp_file=$(exec_host "$node" mktemp -d /tmp/data.XXXX)
|
||||
pv_yaml=$(mktemp --tmpdir pv_config.XXXXXX.yaml)
|
||||
pod_yaml=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
|
||||
msg="Hello from Kubernetes"
|
||||
exec_host "echo $msg > $tmp_file/index.html"
|
||||
exec_host "$node" "echo $msg > $tmp_file/index.html"
|
||||
pod_name="pv-pod"
|
||||
# Define temporary file at yaml
|
||||
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pod_yaml"
|
||||
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pv_yaml"
|
||||
sed -e "s|NODE|${node}|g" "${pod_config_dir}/pv-pod.yaml" > "$pod_yaml"
|
||||
|
||||
}
|
||||
|
||||
@test "Create Persistent Volume" {
|
||||
@ -31,7 +35,7 @@ setup() {
|
||||
volume_claim="pv-claim"
|
||||
|
||||
# Create the persistent volume
|
||||
kubectl create -f "$pod_yaml"
|
||||
kubectl create -f "$pv_yaml"
|
||||
|
||||
# Check the persistent volume is Available
|
||||
cmd="kubectl get pv $volume_name | grep Available"
|
||||
@ -45,7 +49,7 @@ setup() {
|
||||
waitForProcess "$wait_time" "$sleep_time" "$cmd"
|
||||
|
||||
# Create pod
|
||||
kubectl create -f "${pod_config_dir}/pv-pod.yaml"
|
||||
kubectl create -f "$pod_yaml"
|
||||
|
||||
# Check pod creation
|
||||
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
|
||||
@ -62,8 +66,9 @@ teardown() {
|
||||
kubectl describe "pod/$pod_name"
|
||||
|
||||
kubectl delete pod "$pod_name"
|
||||
rm -f "$pod_yaml"
|
||||
kubectl delete pvc "$volume_claim"
|
||||
kubectl delete pv "$volume_name"
|
||||
rm -f "$pod_yaml"
|
||||
exec_host rm -rf "$tmp_file"
|
||||
rm -f "$pv_yaml"
|
||||
exec_host "$node" rm -rf "$tmp_file"
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
runtimeClassName: kata
|
||||
restartPolicy: Never
|
||||
nodeName: NODE
|
||||
volumes:
|
||||
- name: shared-file
|
||||
hostPath:
|
||||
|
@ -10,6 +10,7 @@ metadata:
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
runtimeClassName: kata
|
||||
nodeName: NODE
|
||||
volumes:
|
||||
- name: pv-storage
|
||||
persistentVolumeClaim:
|
||||
|
@ -38,11 +38,23 @@ get_pod_config_dir() {
|
||||
info "k8s configured to use runtimeclass"
|
||||
}
|
||||
|
||||
# Return the first worker found that is kata-runtime labeled.
|
||||
get_one_kata_node() {
|
||||
local resource_name
|
||||
resource_name="$(kubectl get node -l katacontainers.io/kata-runtime=true -o name | head -1)"
|
||||
# Remove leading "/node"
|
||||
echo "${resource_name/"node/"}"
|
||||
}
|
||||
|
||||
# Runs a command in the host filesystem.
|
||||
#
|
||||
# Parameters:
|
||||
# $1 - the node name
|
||||
#
|
||||
exec_host() {
|
||||
node="$(kubectl get node -o name)"
|
||||
node="$1"
|
||||
# `kubectl debug` always returns 0, so we hack it to return the right exit code.
|
||||
command="$@"
|
||||
command="${@:2}"
|
||||
command+='; echo -en \\n$?'
|
||||
# We're trailing the `\r` here due to: https://github.com/kata-containers/kata-containers/issues/8051
|
||||
# tl;dr: When testing with CRI-O we're facing the foillowing error:
|
||||
@ -53,7 +65,7 @@ exec_host() {
|
||||
# [bats-exec-test:38] INFO: k8s configured to use runtimeclass
|
||||
# bash: line 1: $'\r': command not found
|
||||
# ```
|
||||
output="$(kubectl debug -qit "${node}" --image=alpine:latest -- chroot /host bash -c "${command}" | tr -d '\r')"
|
||||
output="$(kubectl debug -qit "node/${node}" --image=alpine:latest -- chroot /host bash -c "${command}" | tr -d '\r')"
|
||||
kubectl get pods -o name | grep node-debugger | xargs kubectl delete > /dev/null
|
||||
exit_code="$(echo "${output}" | tail -1)"
|
||||
echo "$(echo "${output}" | head -n -1)"
|
||||
|
Loading…
Reference in New Issue
Block a user