mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-08-14 14:14:15 +00:00
Merge pull request #7621 from wainersm/gha-run-local
ci: k8s: adapt gha-run.sh to run locally
This commit is contained in:
commit
d311c3dd04
@ -107,6 +107,11 @@ function delete_cluster() {
|
|||||||
--yes
|
--yes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function delete_cluster_kcli() {
|
||||||
|
CLUSTER_NAME="${CLUSTER_NAME:-kata-k8s}"
|
||||||
|
kcli delete -y kube "$CLUSTER_NAME"
|
||||||
|
}
|
||||||
|
|
||||||
function get_nodes_and_pods_info() {
|
function get_nodes_and_pods_info() {
|
||||||
kubectl debug $(kubectl get nodes -o name) -it --image=quay.io/kata-containers/kata-debug:latest || true
|
kubectl debug $(kubectl get nodes -o name) -it --image=quay.io/kata-containers/kata-debug:latest || true
|
||||||
kubectl get pods -o name | grep node-debugger | xargs kubectl delete || true
|
kubectl get pods -o name | grep node-debugger | xargs kubectl delete || true
|
||||||
@ -165,6 +170,44 @@ function deploy_k3s() {
|
|||||||
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
|
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function create_cluster_kcli() {
|
||||||
|
CLUSTER_NAME="${CLUSTER_NAME:-kata-k8s}"
|
||||||
|
|
||||||
|
delete_cluster_kcli || true
|
||||||
|
|
||||||
|
kcli create kube "${KUBE_TYPE:-generic}" \
|
||||||
|
-P domain="kata.com" \
|
||||||
|
-P pool="${LIBVIRT_POOL:-default}" \
|
||||||
|
-P ctlplanes="${CLUSTER_CONTROL_NODES:-1}" \
|
||||||
|
-P workers="${CLUSTER_WORKERS:-1}" \
|
||||||
|
-P network="${LIBVIRT_NETWORK:-default}" \
|
||||||
|
-P image="${CLUSTER_IMAGE:-ubuntu2004}" \
|
||||||
|
-P sdn=flannel \
|
||||||
|
-P nfs=false \
|
||||||
|
-P disk_size="${CLUSTER_DISK_SIZE:-20}" \
|
||||||
|
"${CLUSTER_NAME}"
|
||||||
|
|
||||||
|
export KUBECONFIG="$HOME/.kcli/clusters/$CLUSTER_NAME/auth/kubeconfig"
|
||||||
|
|
||||||
|
local cmd="kubectl get nodes | grep '.*worker.*\<Ready\>'"
|
||||||
|
echo "Wait at least one worker be Ready"
|
||||||
|
if ! waitForProcess "330" "30" "$cmd"; then
|
||||||
|
echo "ERROR: worker nodes not ready."
|
||||||
|
kubectl get nodes
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Ensure that system pods are running or completed.
|
||||||
|
cmd="[ \$(kubectl get pods -A --no-headers | grep -v 'Running\|Completed' | wc -l) -eq 0 ]"
|
||||||
|
echo "Wait system pods be running or completed"
|
||||||
|
if ! waitForProcess "90" "30" "$cmd"; then
|
||||||
|
echo "ERROR: not all pods are Running or Completed."
|
||||||
|
kubectl get pods -A
|
||||||
|
kubectl get pods -A
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
function deploy_rke2() {
|
function deploy_rke2() {
|
||||||
curl -sfL https://get.rke2.io | sudo sh -
|
curl -sfL https://get.rke2.io | sudo sh -
|
||||||
|
|
||||||
|
@ -10,8 +10,16 @@ set -o pipefail
|
|||||||
|
|
||||||
kubernetes_dir="$(dirname "$(readlink -f "$0")")"
|
kubernetes_dir="$(dirname "$(readlink -f "$0")")"
|
||||||
source "${kubernetes_dir}/../../gha-run-k8s-common.sh"
|
source "${kubernetes_dir}/../../gha-run-k8s-common.sh"
|
||||||
|
# shellcheck disable=2154
|
||||||
tools_dir="${repo_root_dir}/tools"
|
tools_dir="${repo_root_dir}/tools"
|
||||||
|
|
||||||
|
DOCKER_REGISTRY=${DOCKER_REGISTRY:-quay.io}
|
||||||
|
DOCKER_REPO=${DOCKER_REPO:-kata-containers/kata-deploy-ci}
|
||||||
|
DOCKER_TAG=${DOCKER_TAG:-kata-containers-latest}
|
||||||
|
KATA_DEPLOY_WAIT_TIMEOUT=${KATA_DEPLOY_WAIT_TIMEOUT:-10m}
|
||||||
|
KATA_HYPERVISOR=${KATA_HYPERVISOR:-qemu}
|
||||||
|
KUBERNETES="${KUBERNETES:-}"
|
||||||
|
|
||||||
function configure_devmapper() {
|
function configure_devmapper() {
|
||||||
sudo mkdir -p /var/lib/containerd/devmapper
|
sudo mkdir -p /var/lib/containerd/devmapper
|
||||||
sudo truncate --size 10G /var/lib/containerd/devmapper/data-disk.img
|
sudo truncate --size 10G /var/lib/containerd/devmapper/data-disk.img
|
||||||
@ -91,7 +99,10 @@ function deploy_kata() {
|
|||||||
platform="${1}"
|
platform="${1}"
|
||||||
ensure_yq
|
ensure_yq
|
||||||
|
|
||||||
# Emsure we're in the default namespace
|
[ "$platform" = "kcli" ] && \
|
||||||
|
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
||||||
|
|
||||||
|
# Ensure we're in the default namespace
|
||||||
kubectl config set-context --current --namespace=default
|
kubectl config set-context --current --namespace=default
|
||||||
|
|
||||||
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||||
@ -114,7 +125,7 @@ function deploy_kata() {
|
|||||||
|
|
||||||
echo "::group::Final kata-deploy.yaml that is used in the test"
|
echo "::group::Final kata-deploy.yaml that is used in the test"
|
||||||
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||||
cat "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
|
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" || die "Failed to setup the tests image"
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
|
|
||||||
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
||||||
@ -123,7 +134,7 @@ function deploy_kata() {
|
|||||||
else
|
else
|
||||||
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
kubectl apply -f "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
|
||||||
fi
|
fi
|
||||||
kubectl -n kube-system wait --timeout=10m --for=condition=Ready -l name=kata-deploy pod
|
kubectl -n kube-system wait --timeout="${KATA_DEPLOY_WAIT_TIMEOUT}" --for=condition=Ready -l name=kata-deploy pod
|
||||||
|
|
||||||
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
|
# This is needed as the kata-deploy pod will be set to "Ready" when it starts running,
|
||||||
# which may cause issues like not having the node properly labeled or the artefacts
|
# which may cause issues like not having the node properly labeled or the artefacts
|
||||||
@ -144,11 +155,16 @@ function deploy_kata() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function run_tests() {
|
function run_tests() {
|
||||||
|
platform="${1:-}"
|
||||||
|
|
||||||
|
[ "$platform" = "kcli" ] && \
|
||||||
|
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
||||||
|
|
||||||
# Delete any spurious tests namespace that was left behind
|
# Delete any spurious tests namespace that was left behind
|
||||||
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
|
kubectl delete namespace kata-containers-k8s-tests &> /dev/null || true
|
||||||
|
|
||||||
# Create a new namespace for the tests and switch to it
|
# Create a new namespace for the tests and switch to it
|
||||||
kubectl apply -f ${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml
|
kubectl apply -f "${kubernetes_dir}/runtimeclass_workloads/tests-namespace.yaml"
|
||||||
kubectl config set-context --current --namespace=kata-containers-k8s-tests
|
kubectl config set-context --current --namespace=kata-containers-k8s-tests
|
||||||
|
|
||||||
pushd "${kubernetes_dir}"
|
pushd "${kubernetes_dir}"
|
||||||
@ -162,6 +178,9 @@ function cleanup() {
|
|||||||
test_type="${2:-k8s}"
|
test_type="${2:-k8s}"
|
||||||
ensure_yq
|
ensure_yq
|
||||||
|
|
||||||
|
[ "$platform" = "kcli" ] && \
|
||||||
|
export KUBECONFIG="$HOME/.kcli/clusters/${CLUSTER_NAME:-kata-k8s}/auth/kubeconfig"
|
||||||
|
|
||||||
echo "Gather information about the nodes and pods before cleaning up the node"
|
echo "Gather information about the nodes and pods before cleaning up the node"
|
||||||
get_nodes_and_pods_info
|
get_nodes_and_pods_info
|
||||||
|
|
||||||
@ -182,6 +201,7 @@ function cleanup() {
|
|||||||
cleanup_spec="-f "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml""
|
cleanup_spec="-f "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=2086
|
||||||
kubectl delete ${deploy_spec}
|
kubectl delete ${deploy_spec}
|
||||||
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
|
||||||
|
|
||||||
@ -196,10 +216,12 @@ function cleanup() {
|
|||||||
|
|
||||||
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
||||||
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
|
||||||
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" | grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" || die "Failed to setup the tests image"
|
grep "${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" || die "Failed to setup the tests image"
|
||||||
|
# shellcheck disable=2086
|
||||||
kubectl apply ${cleanup_spec}
|
kubectl apply ${cleanup_spec}
|
||||||
sleep 180s
|
sleep 180s
|
||||||
|
|
||||||
|
# shellcheck disable=2086
|
||||||
kubectl delete ${cleanup_spec}
|
kubectl delete ${cleanup_spec}
|
||||||
kubectl delete -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
kubectl delete -f "${tools_dir}/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
|
||||||
}
|
}
|
||||||
@ -214,6 +236,7 @@ function main() {
|
|||||||
install-azure-cli) install_azure_cli ;;
|
install-azure-cli) install_azure_cli ;;
|
||||||
login-azure) login_azure ;;
|
login-azure) login_azure ;;
|
||||||
create-cluster) create_cluster ;;
|
create-cluster) create_cluster ;;
|
||||||
|
create-cluster-kcli) create_cluster_kcli ;;
|
||||||
configure-snapshotter) configure_snapshotter ;;
|
configure-snapshotter) configure_snapshotter ;;
|
||||||
setup-crio) setup_crio ;;
|
setup-crio) setup_crio ;;
|
||||||
deploy-k8s) deploy_k8s ;;
|
deploy-k8s) deploy_k8s ;;
|
||||||
@ -221,16 +244,20 @@ function main() {
|
|||||||
install-kubectl) install_kubectl ;;
|
install-kubectl) install_kubectl ;;
|
||||||
get-cluster-credentials) get_cluster_credentials ;;
|
get-cluster-credentials) get_cluster_credentials ;;
|
||||||
deploy-kata-aks) deploy_kata "aks" ;;
|
deploy-kata-aks) deploy_kata "aks" ;;
|
||||||
|
deploy-kata-kcli) deploy_kata "kcli" ;;
|
||||||
deploy-kata-sev) deploy_kata "sev" ;;
|
deploy-kata-sev) deploy_kata "sev" ;;
|
||||||
deploy-kata-snp) deploy_kata "snp" ;;
|
deploy-kata-snp) deploy_kata "snp" ;;
|
||||||
deploy-kata-tdx) deploy_kata "tdx" ;;
|
deploy-kata-tdx) deploy_kata "tdx" ;;
|
||||||
deploy-kata-garm) deploy_kata "garm" ;;
|
deploy-kata-garm) deploy_kata "garm" ;;
|
||||||
run-tests) run_tests ;;
|
run-tests) run_tests ;;
|
||||||
|
run-tests-kcli) run_tests "kcli" ;;
|
||||||
|
cleanup-kcli) cleanup "kcli" ;;
|
||||||
cleanup-sev) cleanup "sev" ;;
|
cleanup-sev) cleanup "sev" ;;
|
||||||
cleanup-snp) cleanup "snp" ;;
|
cleanup-snp) cleanup "snp" ;;
|
||||||
cleanup-tdx) cleanup "tdx" ;;
|
cleanup-tdx) cleanup "tdx" ;;
|
||||||
cleanup-garm) cleanup "garm" ;;
|
cleanup-garm) cleanup "garm" ;;
|
||||||
delete-cluster) cleanup "aks" ;;
|
delete-cluster) cleanup "aks" ;;
|
||||||
|
delete-cluster-kcli) delete_cluster_kcli ;;
|
||||||
*) >&2 echo "Invalid argument"; exit 2 ;;
|
*) >&2 echo "Invalid argument"; exit 2 ;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,8 @@ setup() {
|
|||||||
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
|
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
|
||||||
pod_name="test-file-volume"
|
pod_name="test-file-volume"
|
||||||
container_name="busybox-file-volume-container"
|
container_name="busybox-file-volume-container"
|
||||||
tmp_file=$(exec_host mktemp /tmp/file-volume-test-foo.XXXXX)
|
node="$(get_one_kata_node)"
|
||||||
|
tmp_file=$(exec_host "$node" mktemp /tmp/file-volume-test-foo.XXXXX)
|
||||||
mount_path="/tmp/foo.txt"
|
mount_path="/tmp/foo.txt"
|
||||||
file_body="test"
|
file_body="test"
|
||||||
get_pod_config_dir
|
get_pod_config_dir
|
||||||
@ -22,11 +23,12 @@ setup() {
|
|||||||
|
|
||||||
@test "Test readonly volume for pods" {
|
@test "Test readonly volume for pods" {
|
||||||
# Write test body to temp file
|
# Write test body to temp file
|
||||||
exec_host "echo "$file_body" > $tmp_file"
|
exec_host "$node" "echo "$file_body" > $tmp_file"
|
||||||
|
|
||||||
# Create test yaml
|
# Create test yaml
|
||||||
sed -e "s|HOST_FILE|$tmp_file|" ${pod_config_dir}/pod-file-volume.yaml > ${pod_config_dir}/test-pod-file-volume.yaml
|
sed -e "s|HOST_FILE|$tmp_file|" ${pod_config_dir}/pod-file-volume.yaml > ${pod_config_dir}/test-pod-file-volume.yaml
|
||||||
sed -i "s|MOUNT_PATH|$mount_path|" ${pod_config_dir}/test-pod-file-volume.yaml
|
sed -i "s|MOUNT_PATH|$mount_path|" ${pod_config_dir}/test-pod-file-volume.yaml
|
||||||
|
sed -i "s|NODE|$node|" ${pod_config_dir}/test-pod-file-volume.yaml
|
||||||
|
|
||||||
# Create pod
|
# Create pod
|
||||||
kubectl create -f "${pod_config_dir}/test-pod-file-volume.yaml"
|
kubectl create -f "${pod_config_dir}/test-pod-file-volume.yaml"
|
||||||
@ -43,6 +45,6 @@ teardown() {
|
|||||||
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
[ "${KATA_HYPERVISOR}" == "firecracker" ] && skip "test not working see: ${fc_limitations}"
|
||||||
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
|
[ "${KATA_HYPERVISOR}" == "fc" ] && skip "test not working see: ${fc_limitations}"
|
||||||
kubectl delete pod "$pod_name"
|
kubectl delete pod "$pod_name"
|
||||||
exec_host rm -f $tmp_file
|
exec_host "$node" rm -f $tmp_file
|
||||||
rm -f ${pod_config_dir}/test-pod-file-volume.yaml.yaml
|
rm -f ${pod_config_dir}/test-pod-file-volume.yaml.yaml
|
||||||
}
|
}
|
||||||
|
@ -15,13 +15,17 @@ setup() {
|
|||||||
|
|
||||||
get_pod_config_dir
|
get_pod_config_dir
|
||||||
|
|
||||||
tmp_file=$(exec_host mktemp -d /tmp/data.XXXX)
|
node=$(get_one_kata_node)
|
||||||
|
tmp_file=$(exec_host "$node" mktemp -d /tmp/data.XXXX)
|
||||||
|
pv_yaml=$(mktemp --tmpdir pv_config.XXXXXX.yaml)
|
||||||
pod_yaml=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
|
pod_yaml=$(mktemp --tmpdir pod_config.XXXXXX.yaml)
|
||||||
msg="Hello from Kubernetes"
|
msg="Hello from Kubernetes"
|
||||||
exec_host "echo $msg > $tmp_file/index.html"
|
exec_host "$node" "echo $msg > $tmp_file/index.html"
|
||||||
pod_name="pv-pod"
|
pod_name="pv-pod"
|
||||||
# Define temporary file at yaml
|
# Define temporary file at yaml
|
||||||
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pod_yaml"
|
sed -e "s|tmp_data|${tmp_file}|g" ${pod_config_dir}/pv-volume.yaml > "$pv_yaml"
|
||||||
|
sed -e "s|NODE|${node}|g" "${pod_config_dir}/pv-pod.yaml" > "$pod_yaml"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@test "Create Persistent Volume" {
|
@test "Create Persistent Volume" {
|
||||||
@ -31,7 +35,7 @@ setup() {
|
|||||||
volume_claim="pv-claim"
|
volume_claim="pv-claim"
|
||||||
|
|
||||||
# Create the persistent volume
|
# Create the persistent volume
|
||||||
kubectl create -f "$pod_yaml"
|
kubectl create -f "$pv_yaml"
|
||||||
|
|
||||||
# Check the persistent volume is Available
|
# Check the persistent volume is Available
|
||||||
cmd="kubectl get pv $volume_name | grep Available"
|
cmd="kubectl get pv $volume_name | grep Available"
|
||||||
@ -45,7 +49,7 @@ setup() {
|
|||||||
waitForProcess "$wait_time" "$sleep_time" "$cmd"
|
waitForProcess "$wait_time" "$sleep_time" "$cmd"
|
||||||
|
|
||||||
# Create pod
|
# Create pod
|
||||||
kubectl create -f "${pod_config_dir}/pv-pod.yaml"
|
kubectl create -f "$pod_yaml"
|
||||||
|
|
||||||
# Check pod creation
|
# Check pod creation
|
||||||
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
|
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
|
||||||
@ -62,8 +66,9 @@ teardown() {
|
|||||||
kubectl describe "pod/$pod_name"
|
kubectl describe "pod/$pod_name"
|
||||||
|
|
||||||
kubectl delete pod "$pod_name"
|
kubectl delete pod "$pod_name"
|
||||||
|
rm -f "$pod_yaml"
|
||||||
kubectl delete pvc "$volume_claim"
|
kubectl delete pvc "$volume_claim"
|
||||||
kubectl delete pv "$volume_name"
|
kubectl delete pv "$volume_name"
|
||||||
rm -f "$pod_yaml"
|
rm -f "$pv_yaml"
|
||||||
exec_host rm -rf "$tmp_file"
|
exec_host "$node" rm -rf "$tmp_file"
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,7 @@ spec:
|
|||||||
terminationGracePeriodSeconds: 0
|
terminationGracePeriodSeconds: 0
|
||||||
runtimeClassName: kata
|
runtimeClassName: kata
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
|
nodeName: NODE
|
||||||
volumes:
|
volumes:
|
||||||
- name: shared-file
|
- name: shared-file
|
||||||
hostPath:
|
hostPath:
|
||||||
|
@ -10,6 +10,7 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
terminationGracePeriodSeconds: 0
|
terminationGracePeriodSeconds: 0
|
||||||
runtimeClassName: kata
|
runtimeClassName: kata
|
||||||
|
nodeName: NODE
|
||||||
volumes:
|
volumes:
|
||||||
- name: pv-storage
|
- name: pv-storage
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
|
@ -38,11 +38,23 @@ get_pod_config_dir() {
|
|||||||
info "k8s configured to use runtimeclass"
|
info "k8s configured to use runtimeclass"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Return the first worker found that is kata-runtime labeled.
|
||||||
|
get_one_kata_node() {
|
||||||
|
local resource_name
|
||||||
|
resource_name="$(kubectl get node -l katacontainers.io/kata-runtime=true -o name | head -1)"
|
||||||
|
# Remove leading "/node"
|
||||||
|
echo "${resource_name/"node/"}"
|
||||||
|
}
|
||||||
|
|
||||||
# Runs a command in the host filesystem.
|
# Runs a command in the host filesystem.
|
||||||
|
#
|
||||||
|
# Parameters:
|
||||||
|
# $1 - the node name
|
||||||
|
#
|
||||||
exec_host() {
|
exec_host() {
|
||||||
node="$(kubectl get node -o name)"
|
node="$1"
|
||||||
# `kubectl debug` always returns 0, so we hack it to return the right exit code.
|
# `kubectl debug` always returns 0, so we hack it to return the right exit code.
|
||||||
command="$@"
|
command="${@:2}"
|
||||||
command+='; echo -en \\n$?'
|
command+='; echo -en \\n$?'
|
||||||
# We're trailing the `\r` here due to: https://github.com/kata-containers/kata-containers/issues/8051
|
# We're trailing the `\r` here due to: https://github.com/kata-containers/kata-containers/issues/8051
|
||||||
# tl;dr: When testing with CRI-O we're facing the foillowing error:
|
# tl;dr: When testing with CRI-O we're facing the foillowing error:
|
||||||
@ -53,7 +65,7 @@ exec_host() {
|
|||||||
# [bats-exec-test:38] INFO: k8s configured to use runtimeclass
|
# [bats-exec-test:38] INFO: k8s configured to use runtimeclass
|
||||||
# bash: line 1: $'\r': command not found
|
# bash: line 1: $'\r': command not found
|
||||||
# ```
|
# ```
|
||||||
output="$(kubectl debug -qit "${node}" --image=alpine:latest -- chroot /host bash -c "${command}" | tr -d '\r')"
|
output="$(kubectl debug -qit "node/${node}" --image=alpine:latest -- chroot /host bash -c "${command}" | tr -d '\r')"
|
||||||
kubectl get pods -o name | grep node-debugger | xargs kubectl delete > /dev/null
|
kubectl get pods -o name | grep node-debugger | xargs kubectl delete > /dev/null
|
||||||
exit_code="$(echo "${output}" | tail -1)"
|
exit_code="$(echo "${output}" | tail -1)"
|
||||||
echo "$(echo "${output}" | head -n -1)"
|
echo "$(echo "${output}" | head -n -1)"
|
||||||
|
@ -6,39 +6,39 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: kubelet-kata-cleanup
|
name: kubelet-kata-cleanup
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
name: kubelet-kata-cleanup
|
name: kubelet-kata-cleanup
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: kata-deploy-sa
|
serviceAccountName: kata-deploy-sa
|
||||||
hostPID: true
|
hostPID: true
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
katacontainers.io/kata-runtime: cleanup
|
katacontainers.io/kata-runtime: cleanup
|
||||||
containers:
|
containers:
|
||||||
- name: kube-kata-cleanup
|
- name: kube-kata-cleanup
|
||||||
image: quay.io/kata-containers/kata-deploy:latest
|
image: quay.io/kata-containers/kata-deploy:latest
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh reset" ]
|
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh reset"]
|
||||||
env:
|
env:
|
||||||
- name: NODE_NAME
|
- name: NODE_NAME
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
- name: DEBUG
|
- name: DEBUG
|
||||||
value: "false"
|
value: "false"
|
||||||
- name: SHIMS
|
- name: SHIMS
|
||||||
value: "clh dragonball fc qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx qemu"
|
value: "clh dragonball fc qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx qemu"
|
||||||
- name: DEFAULT_SHIM
|
- name: DEFAULT_SHIM
|
||||||
value: "qemu"
|
value: "qemu"
|
||||||
- name: CREATE_RUNTIMECLASSES
|
- name: CREATE_RUNTIMECLASSES
|
||||||
value: "false"
|
value: "false"
|
||||||
- name: CREATE_DEFAULT_RUNTIMECLASS
|
- name: CREATE_DEFAULT_RUNTIMECLASS
|
||||||
value: "false"
|
value: "false"
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
updateStrategy:
|
updateStrategy:
|
||||||
rollingUpdate:
|
rollingUpdate:
|
||||||
maxUnavailable: 1
|
maxUnavailable: 1
|
||||||
|
@ -6,50 +6,50 @@ metadata:
|
|||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
name: kata-deploy
|
name: kata-deploy
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
name: kata-deploy
|
name: kata-deploy
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: kata-deploy-sa
|
serviceAccountName: kata-deploy-sa
|
||||||
hostPID: true
|
hostPID: true
|
||||||
containers:
|
containers:
|
||||||
- name: kube-kata
|
- name: kube-kata
|
||||||
image: quay.io/kata-containers/kata-deploy:latest
|
image: quay.io/kata-containers/kata-deploy:latest
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
lifecycle:
|
lifecycle:
|
||||||
preStop:
|
preStop:
|
||||||
exec:
|
exec:
|
||||||
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"]
|
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh cleanup"]
|
||||||
command: [ "bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install" ]
|
command: ["bash", "-c", "/opt/kata-artifacts/scripts/kata-deploy.sh install"]
|
||||||
env:
|
env:
|
||||||
- name: NODE_NAME
|
- name: NODE_NAME
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
- name: DEBUG
|
- name: DEBUG
|
||||||
value: "false"
|
value: "false"
|
||||||
- name: SHIMS
|
- name: SHIMS
|
||||||
value: "clh dragonball fc qemu qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx"
|
value: "clh dragonball fc qemu qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx"
|
||||||
- name: DEFAULT_SHIM
|
- name: DEFAULT_SHIM
|
||||||
value: "qemu"
|
value: "qemu"
|
||||||
- name: CREATE_RUNTIMECLASSES
|
- name: CREATE_RUNTIMECLASSES
|
||||||
value: "false"
|
value: "false"
|
||||||
- name: CREATE_DEFAULT_RUNTIMECLASS
|
- name: CREATE_DEFAULT_RUNTIMECLASS
|
||||||
value: "false"
|
value: "false"
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: crio-conf
|
- name: crio-conf
|
||||||
mountPath: /etc/crio/
|
mountPath: /etc/crio/
|
||||||
- name: containerd-conf
|
- name: containerd-conf
|
||||||
mountPath: /etc/containerd/
|
mountPath: /etc/containerd/
|
||||||
- name: kata-artifacts
|
- name: kata-artifacts
|
||||||
mountPath: /opt/kata/
|
mountPath: /opt/kata/
|
||||||
- name: local-bin
|
- name: local-bin
|
||||||
mountPath: /usr/local/bin/
|
mountPath: /usr/local/bin/
|
||||||
volumes:
|
volumes:
|
||||||
- name: crio-conf
|
- name: crio-conf
|
||||||
hostPath:
|
hostPath:
|
||||||
|
Loading…
Reference in New Issue
Block a user