tests: Re-enable measured_rootfs test for TDX

As we're now building everything needed to test TDX with measured rootfs
support, let's bring this test back in (for TDX only, at least for now).

Signed-off-by: Fabiano Fidêncio <fabiano@fidencio.org>
This commit is contained in:
Fabiano Fidêncio 2024-10-25 08:54:55 +02:00
parent d537932e66
commit 7d202fc173
No known key found for this signature in database
GPG Key ID: EE926C2BDACC177B

View File

@ -5,16 +5,14 @@
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/lib.sh"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
check_and_skip() {
# Currently the kernel-confidential, isn't built withh measured rootfs support, so this test
# should be skipped until it is
# See https://github.com/kata-containers/kata-containers/issues/9612,
# https://github.com/kata-containers/kata-containers/issues/7235
# and https://github.com/kata-containers/kata-containers/issues/7415
skip "measured rootfs tests not implemented for hypervisor: $KATA_HYPERVISOR"
if [ "${KATA_HYPERVISOR}" != "qemu-tdx" ]; then
skip "measured rootfs tests not implemented for hypervisor: $KATA_HYPERVISOR"
fi
}
setup() {
@ -25,11 +23,10 @@ setup() {
@test "Test cannnot launch pod with measured boot enabled and incorrect hash" {
pod_config="$(new_pod_config nginx "kata-${KATA_HYPERVISOR}")"
incorrect_hash="5180b1568c2ba972e4e06ee0a55976acae8329f2a5d1d2004395635e1ec4a76e"
incorrect_hash="1111111111111111111111111111111111111111111111111111111111111111"
# Despite the kernel being built with support, it is not currently enabled
# on configuration.toml. To avoid editing that file on the worker node,
# here it will be enabled via pod annotations.
# To avoid editing that file on the worker node, here it will be
# enabled via pod annotations.
set_metadata_annotation "$pod_config" \
"io.katacontainers.config.hypervisor.kernel_params" \
"rootfs_verity.scheme=dm-verity rootfs_verity.hash=$incorrect_hash"
@ -45,10 +42,9 @@ setup() {
echo "Pod $pod_config file:"
cat $pod_config
assert_pod_fail "$pod_config"
kubectl apply -f $pod_config
assert_logs_contain "$node" kata "$node_start_time" \
'verity: .* metadata block .* is corrupted'
waitForProcess "60" "3" "exec_host $node journalctl -t kata | grep \"verity: .* metadata block .* is corrupted\""
}
teardown() {