tests: add guest pull image test

Add a test case of pulling image inside the guest for confidential
containers.

Signed-off-by: Da Li Liu <liudali@cn.ibm.com>
Signed-off-by: ChengyuZhu6 <chengyu.zhu@intel.com>
Co-authored-by: Fabiano Fidêncio <fabiano.fidencio@intel.com>
Co-authored-by: stevenhorsman <steven@uk.ibm.com>
Co-authored-by: Georgina Kinge <georgina.kinge@ibm.com>
Co-authored-by: Megan Wright <Megan.Wright@ibm.com>
This commit is contained in:
ChengyuZhu6 2023-11-28 22:15:05 +08:00 committed by Fabiano Fidêncio
parent e8c4effc07
commit c52b356482
No known key found for this signature in database
GPG Key ID: EE926C2BDACC177B
3 changed files with 268 additions and 3 deletions

View File

@ -0,0 +1,175 @@
#!/usr/bin/env bats
# Copyright (c) 2023 Intel Corporation
# Copyright (c) 2023 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/lib.sh"
load "${BATS_TEST_DIRNAME}/confidential_common.sh"
setup() {
confidential_setup || skip "Test not supported for ${KATA_HYPERVISOR}."
setup_common
unencrypted_image_1="quay.io/sjenning/nginx:1.15-alpine"
unencrypted_image_2="quay.io/prometheus/busybox:latest"
}
@test "Test we can pull an unencrypted image outside the guest with runc and then inside the guest successfully" {
[[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]] && skip "Test not supported for ${KATA_HYPERVISOR}."
# 1. Create one runc pod with the $unencrypted_image_1 image
# We want to have one runc pod, so we pass a fake runtimeclass "runc" and then delete the runtimeClassName,
# because the runtimeclass is not optional in new_pod_config function.
runc_pod_config="$(new_pod_config "$unencrypted_image_1" "runc")"
sed -i '/runtimeClassName:/d' $runc_pod_config
set_node "$runc_pod_config" "$node"
set_container_command "$runc_pod_config" "0" "sleep" "30"
# For debug sake
echo "Pod $runc_pod_config file:"
cat $runc_pod_config
k8s_create_pod "$runc_pod_config"
echo "Runc pod test-e2e is running"
kubectl delete -f "$runc_pod_config"
# 2. Create one kata pod with the $unencrypted_image_1 image and nydus annotation
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_with_nydus_config" "$node"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"
# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"
# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config
k8s_create_pod "$kata_pod_with_nydus_config"
echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# With annotation for nydus, only rootfs for pause container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "1"
}
@test "Test we can pull an unencrypted image inside the guest twice in a row and then outside the guest successfully" {
[[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]] && skip "Test not supported for ${KATA_HYPERVISOR}."
skip "Skip this test until we use containerd 2.0 with 'image pull per runtime class' feature: https://github.com/containerd/containerd/issues/9377"
# 1. Create one kata pod with the $unencrypted_image_1 image and nydus annotation twice
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_with_nydus_config" "$node"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"
# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"
# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config
k8s_create_pod "$kata_pod_with_nydus_config"
echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# With annotation for nydus, only rootfs for pause container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "1"
kubectl delete -f $kata_pod_with_nydus_config
# 2. Create one kata pod with the $unencrypted_image_1 image and without nydus annotation
kata_pod_without_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_without_nydus_config" "$node"
set_container_command "$kata_pod_without_nydus_config" "0" "sleep" "30"
# For debug sake
echo "Pod $kata_pod_without_nydus_config file:"
cat $kata_pod_without_nydus_config
k8s_create_pod "$kata_pod_without_nydus_config"
echo "Kata pod test-e2e without nydus annotation is running"
echo "Check the image was not pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# The assert_rootfs_count will be FAIL.
# The expect count of rootfs in host is "2" but the found count of rootfs in host is "1"
# As the the first time we pull the $unencrypted_image_1 image via nydus-snapshotter in the guest
# for all subsequent pulls still use nydus-snapshotter in the guest
# More details: https://github.com/kata-containers/kata-containers/issues/8337
# The test case will be PASS after we use containerd 2.0 with 'image pull per runtime class' feature:
# https://github.com/containerd/containerd/issues/9377
assert_rootfs_count "$node" "$sandbox_id" "2"
}
@test "Test we can pull an other unencrypted image outside the guest and then inside the guest successfully" {
[[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]] && skip "Test not supported for ${KATA_HYPERVISOR}."
skip "Skip this test until we use containerd 2.0 with 'image pull per runtime class' feature: https://github.com/containerd/containerd/issues/9377"
# 1. Create one kata pod with the $unencrypted_image_2 image and without nydus annotation
kata_pod_without_nydus_config="$(new_pod_config "$unencrypted_image_2" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_without_nydus_config" "$node"
set_container_command "$kata_pod_without_nydus_config" "0" "sleep" "30"
# For debug sake
echo "Pod $kata_pod_without_nydus_config file:"
cat $kata_pod_without_nydus_config
k8s_create_pod "$kata_pod_without_nydus_config"
echo "Kata pod test-e2e without nydus annotation is running"
echo "Checking the image was pulled in the host"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# Without annotation for nydus, both rootfs for pause and the test container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "2"
kubectl delete -f $kata_pod_without_nydus_config
# 2. Create one kata pod with the $unencrypted_image_2 image and with nydus annotation
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_2" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_with_nydus_config" "$node"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"
# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"
# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config
k8s_create_pod "$kata_pod_with_nydus_config"
echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# The assert_rootfs_count will be FAIL.
# The expect count of rootfs in host is "1" but the found count of rootfs in host is "2"
# As the the first time we pull the $unencrypted_image_2 image via overlayfs-snapshotter in host
# for all subsequent pulls still use overlayfs-snapshotter in host.
# More details: https://github.com/kata-containers/kata-containers/issues/8337
# The test case will be PASS after we use containerd 2.0 with 'image pull per runtime class' feature:
# https://github.com/containerd/containerd/issues/9377
assert_rootfs_count "$node" "$sandbox_id" "1"
}
teardown() {
check_hypervisor_for_confidential_tests ${KATA_HYPERVISOR} || skip "Test not supported for ${KATA_HYPERVISOR}."
kubectl describe pod "$pod_name"
k8s_delete_all_pods_if_any_exists || true
}

View File

@ -8,6 +8,9 @@
#
set -e
wait_time=60
sleep_time=3
# Delete all pods if any exist, otherwise just return
#
k8s_delete_all_pods_if_any_exists() {
@ -94,11 +97,49 @@ assert_pod_fail() {
! k8s_create_pod "$container_config" || /bin/false
}
# Check the pulled rootfs on host for given node and sandbox_id
#
# Parameters:
# $1 - the k8s worker node name
# $2 - the sandbox id for kata container
# $3 - the expected count of pulled rootfs
#
assert_rootfs_count() {
local node="$1"
local sandbox_id="$2"
local expect_count="$3"
local allrootfs=""
# verify that the sandbox_id is not empty;
# otherwise, the command $(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id} -name rootfs -type d")
# may yield an unexpected count of rootfs.
if [ -z "$sandbox_id" ]; then
return 1
fi
# Max loop 3 times to get all pulled rootfs for given sandbox_id
for _ in {1..3}
do
allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id} -name rootfs -type d")
if [ -n "$allrootfs" ]; then
break
else
sleep 1
fi
done
echo "allrootfs is: $allrootfs"
count=$(echo $allrootfs | grep -o "rootfs" | wc -l)
echo "count of container rootfs in host is: $count, expect count is: $expect_count"
[ $expect_count -eq $count ]
}
# Create a pod configuration out of a template file.
#
# Parameters:
# $1 - the container image.
# $2 - the runtimeclass
# $2 - the runtimeclass, is not optional.
# $3 - the specific node name, optional.
#
# Return:
# the path to the configuration file. The caller should not care about
@ -116,6 +157,7 @@ new_pod_config() {
new_config=$(mktemp "${BATS_FILE_TMPDIR}/$(basename "${base_config}").XXX")
IMAGE="$image" RUNTIMECLASS="$runtimeclass" envsubst < "$base_config" > "$new_config"
echo "$new_config"
}
@ -147,7 +189,23 @@ set_metadata_annotation() {
echo "$annotation_key"
# yq set annotations in yaml. Quoting the key because it can have
# dots.
yq w -i --style=double "${yaml}" "${annotation_key}" "${value}"
yq write -i --style=double "${yaml}" "${annotation_key}" "${value}"
}
# Set the command for container spec.
#
# Parameters:
# $1 - the yaml file
# $2 - the index of the container
# $N - the command values
#
set_container_command() {
local yaml="${1}"
local container_idx="${2}"
shift 2
for command_value in "$@"; do
yq write -i "${yaml}" "spec.containers[${container_idx}].command[+]" --tag '!!str' "${command_value}"
done
}
# Set the node name on configuration spec.
@ -161,7 +219,7 @@ set_node() {
local node="$2"
[ -n "$node" ] || return 1
yq w -i "${yaml}" "spec.nodeName" "$node"
yq write -i "${yaml}" "spec.nodeName" "$node"
}
# Get the systemd's journal from a worker node
@ -183,3 +241,30 @@ print_node_journal() {
kubectl get pods -o name | grep "node-debugger-${node}" | \
xargs kubectl delete > /dev/null
}
# Get the sandbox id for kata container from a worker node
#
# Parameters:
# $1 - the k8s worker node name
#
get_node_kata_sandbox_id() {
local node="$1"
local kata_sandbox_id=""
local local_wait_time="${wait_time}"
# Max loop 3 times to get kata_sandbox_id
while [ "$local_wait_time" -gt 0 ];
do
kata_sandbox_id=$(exec_host $node "ps -ef |\
grep containerd-shim-kata-v2" |\
grep -oP '(?<=-id\s)[a-f0-9]+' |\
tail -1)
if [ -n "$kata_sandbox_id" ]; then
break
else
sleep "${sleep_time}"
local_wait_time=$((local_wait_time-sleep_time))
fi
done
echo $kata_sandbox_id
}

View File

@ -20,7 +20,12 @@ ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY:-$(base64 -w 0 runtimeclass_workloads_work/
if [ -n "${K8S_TEST_UNION:-}" ]; then
K8S_TEST_UNION=($K8S_TEST_UNION)
else
# Before we use containerd 2.0 with 'image pull per runtime class' feature
# we need run k8s-guest-pull-image.bats test first, otherwise the test result will be affected
# by other cases which are using 'alpine' and 'quay.io/prometheus/busybox:latest' image.
# more details https://github.com/kata-containers/kata-containers/issues/8337
K8S_TEST_SMALL_HOST_UNION=( \
"k8s-guest-pull-image.bats" \
"k8s-confidential.bats" \
"k8s-attach-handlers.bats" \
"k8s-caps.bats" \