mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-01-25 14:44:16 +00:00
tests: nvidia: cc: Add attestation test
Add the attestation bats test case to the NVIDIA CI and provide a second pod manifest for the attestation test with a GPU. This will enable composite attestation in a subsequent step. Signed-off-by: Manuel Huber <manuelh@nvidia.com>
This commit is contained in:
committed by
Fabiano Fidêncio
parent
e31d592a0c
commit
34efa83afc
@@ -48,6 +48,7 @@ jobs:
|
||||
GH_PR_NUMBER: ${{ inputs.pr-number }}
|
||||
KATA_HYPERVISOR: ${{ matrix.environment.vmm }}
|
||||
KUBERNETES: kubeadm
|
||||
KBS: ${{ matrix.environment.name == 'nvidia-gpu-snp' && 'true' || 'false' }}
|
||||
K8S_TEST_HOST_TYPE: baremetal
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
@@ -11,6 +11,7 @@ load "${BATS_TEST_DIRNAME}/confidential_common.sh"
|
||||
export KBS="${KBS:-false}"
|
||||
export test_key="aatest"
|
||||
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
|
||||
export RUNTIME_CLASS_NAME="kata-${KATA_HYPERVISOR}"
|
||||
export AA_KBC="${AA_KBC:-cc_kbc}"
|
||||
|
||||
setup() {
|
||||
@@ -25,7 +26,18 @@ setup() {
|
||||
|
||||
# setup_unencrypted_confidential_pod
|
||||
|
||||
export K8S_TEST_YAML="${pod_config_dir}/pod-attestable.yaml"
|
||||
if is_confidential_gpu_hardware; then
|
||||
POD_TEMPLATE_BASENAME="pod-attestable-gpu"
|
||||
else
|
||||
POD_TEMPLATE_BASENAME="pod-attestable"
|
||||
fi
|
||||
|
||||
local pod_yaml_in="${pod_config_dir}/${POD_TEMPLATE_BASENAME}.yaml.in"
|
||||
export K8S_TEST_YAML="${pod_config_dir}/${POD_TEMPLATE_BASENAME}.yaml"
|
||||
|
||||
# Substitute environment variables in the YAML template
|
||||
envsubst < "${pod_yaml_in}" > "${K8S_TEST_YAML}"
|
||||
|
||||
# Schedule on a known node so that later it can print the system's logs for
|
||||
# debugging.
|
||||
set_node "$K8S_TEST_YAML" "$node"
|
||||
@@ -36,7 +48,7 @@ setup() {
|
||||
kernel_params_annotation="io.katacontainers.config.hypervisor.kernel_params"
|
||||
kernel_params_value="agent.guest_components_rest_api=resource"
|
||||
# Based on current config we still need to pass the agent.aa_kbc_params, but this might change
|
||||
# as the CDH/Attestaiton-agent config gets updated
|
||||
# as the CDH/attestation-agent config gets updated
|
||||
if [ "${AA_KBC}" = "cc_kbc" ]; then
|
||||
kernel_params_value+=" agent.aa_kbc_params=cc_kbc::${CC_KBS_ADDR}"
|
||||
fi
|
||||
@@ -46,10 +58,12 @@ setup() {
|
||||
}
|
||||
|
||||
@test "Get CDH resource" {
|
||||
if ! is_confidential_hardware; then
|
||||
kbs_set_allow_all_resources
|
||||
else
|
||||
if is_confidential_gpu_hardware; then
|
||||
kbs_set_gpu0_resource_policy
|
||||
elif is_confidential_hardware; then
|
||||
kbs_set_default_policy
|
||||
else
|
||||
kbs_set_allow_all_resources
|
||||
fi
|
||||
|
||||
kubectl apply -f "${K8S_TEST_YAML}"
|
||||
@@ -67,6 +81,12 @@ setup() {
|
||||
cmd="kubectl logs aa-test-cc | grep -q ${test_key}"
|
||||
run bash -c "$cmd"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
if is_confidential_gpu_hardware; then
|
||||
cmd="kubectl logs aa-test-cc | grep -iq 'Confidential Compute GPUs Ready state:[[:space:]]*ready'"
|
||||
run bash -c "$cmd"
|
||||
[ "$status" -eq 0 ]
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Cannot get CDH resource when deny-all policy is set" {
|
||||
|
||||
@@ -42,7 +42,8 @@ ENABLE_NVRC_TRACE="${ENABLE_NVRC_TRACE:-true}"
|
||||
if [ -n "${K8S_TEST_NV:-}" ]; then
|
||||
K8S_TEST_NV=($K8S_TEST_NV)
|
||||
else
|
||||
K8S_TEST_NV=("k8s-nvidia-cuda.bats" \
|
||||
K8S_TEST_NV=("k8s-confidential-attestation.bats" \
|
||||
"k8s-nvidia-cuda.bats" \
|
||||
"k8s-nvidia-nim.bats")
|
||||
fi
|
||||
|
||||
@@ -66,6 +67,11 @@ fi
|
||||
export AUTO_GENERATE_POLICY
|
||||
|
||||
|
||||
# TODO: remove this unconditional assignment as soon as this variable is set in .github/workflows/run-k8s-tests-on-nvidia-gpu.yaml
|
||||
if [[ "${KATA_HYPERVISOR:-}" == "qemu-nvidia-gpu-snp" ]] || [[ "${KATA_HYPERVISOR:-}" == "qemu-nvidia-gpu-tdx" ]]; then
|
||||
export KBS="true"
|
||||
fi
|
||||
|
||||
ensure_yq
|
||||
|
||||
if [[ "${ENABLE_NVRC_TRACE:-true}" == "true" ]]; then
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
# Copyright (c) 2025 NVIDIA Corporation
|
||||
# Copyright (c) 2023-2024 Intel Corporation
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: aa-test-cc
|
||||
annotations:
|
||||
cdi.k8s.io/gpu: "nvidia.com/pgpu=0"
|
||||
spec:
|
||||
runtimeClassName: ${RUNTIME_CLASS_NAME}
|
||||
containers:
|
||||
- name: bash-curl
|
||||
image: nvidia/cuda:12.2.0-base-ubuntu22.04
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
set -e
|
||||
apt update && apt install -y curl
|
||||
curl http://127.0.0.1:8006/cdh/resource/default/aa/key
|
||||
nvidia-smi conf-compute -grs
|
||||
sleep 1000
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- test -x /usr/bin/curl
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/pgpu: "1"
|
||||
memory: 16Gi
|
||||
@@ -7,7 +7,7 @@ kind: Pod
|
||||
metadata:
|
||||
name: aa-test-cc
|
||||
spec:
|
||||
runtimeClassName: kata
|
||||
runtimeClassName: ${RUNTIME_CLASS_NAME}
|
||||
containers:
|
||||
- name: bash-curl
|
||||
image: quay.io/kata-containers/alpine-bash-curl:latest
|
||||
Reference in New Issue
Block a user