kata-deploy: Add qemu-nvidia-gpu-tdx-runtime-rs shim

Register the new qemu-nvidia-gpu-tdx-runtime-rs shim across the kata-deploy
stack so it is built, installed, and exposed as a RuntimeClass.

This adds the shim to the Rust binary's RUST_SHIMS list (so it uses the
runtime-rs binary), SHIMS list, the qemu-tdx-experimental share name
mapping, and the x86_64 default shim set. The Helm chart gets the new
shim entry in values.yaml, try-kata-nvidia-gpu.values.yaml, and the
RuntimeClass overhead definition in runtimeclasses.yaml.

Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
Signed-off-by: Alex Lyn <alex.lyn@antgroup.com>
This commit is contained in:
Fabiano Fidêncio
2026-04-04 12:13:07 +02:00
parent 1135f19d43
commit 2b17f9cbda
6 changed files with 47 additions and 2 deletions

View File

@@ -34,6 +34,7 @@ const ALL_SHIMS: &[&str] = &[
"qemu-nvidia-gpu-snp",
"qemu-nvidia-gpu-snp-runtime-rs",
"qemu-nvidia-gpu-tdx",
"qemu-nvidia-gpu-tdx-runtime-rs",
"qemu-runtime-rs",
"qemu-se",
"qemu-se-runtime-rs",
@@ -669,6 +670,7 @@ fn get_qemu_share_name(shim: &str) -> Option<String> {
"qemu-nvidia-gpu-snp" => "qemu-snp-experimental",
"qemu-nvidia-gpu-snp-runtime-rs" => "qemu-snp-experimental",
"qemu-nvidia-gpu-tdx" => "qemu-tdx-experimental",
"qemu-nvidia-gpu-tdx-runtime-rs" => "qemu-tdx-experimental",
_ => "qemu",
};
@@ -1062,6 +1064,7 @@ mod tests {
#[case("qemu-nvidia-gpu-snp", "qemu")]
#[case("qemu-nvidia-gpu-snp-runtime-rs", "qemu")]
#[case("qemu-nvidia-gpu-tdx", "qemu")]
#[case("qemu-nvidia-gpu-tdx-runtime-rs", "qemu")]
#[case("qemu-runtime-rs", "qemu")]
#[case("qemu-coco-dev-runtime-rs", "qemu")]
#[case("qemu-se-runtime-rs", "qemu")]

View File

@@ -718,7 +718,7 @@ fn parse_custom_runtimes() -> Result<Vec<CustomRuntime>> {
/// Returns only shims that are supported for that architecture
fn get_default_shims_for_arch(arch: &str) -> &'static str {
match arch {
"x86_64" => "clh cloud-hypervisor dragonball fc qemu qemu-coco-dev qemu-coco-dev-runtime-rs qemu-runtime-rs qemu-nvidia-gpu qemu-nvidia-gpu-runtime-rs qemu-nvidia-gpu-snp qemu-nvidia-gpu-snp-runtime-rs qemu-nvidia-gpu-tdx qemu-snp qemu-snp-runtime-rs qemu-tdx qemu-tdx-runtime-rs",
"x86_64" => "clh cloud-hypervisor dragonball fc qemu qemu-coco-dev qemu-coco-dev-runtime-rs qemu-runtime-rs qemu-nvidia-gpu qemu-nvidia-gpu-runtime-rs qemu-nvidia-gpu-snp qemu-nvidia-gpu-snp-runtime-rs qemu-nvidia-gpu-tdx qemu-nvidia-gpu-tdx-runtime-rs qemu-snp qemu-snp-runtime-rs qemu-tdx qemu-tdx-runtime-rs",
"aarch64" => "clh cloud-hypervisor dragonball fc qemu qemu-runtime-rs qemu-nvidia-gpu qemu-nvidia-gpu-runtime-rs qemu-cca",
"s390x" => "qemu qemu-runtime-rs qemu-se qemu-se-runtime-rs qemu-coco-dev qemu-coco-dev-runtime-rs",
"ppc64le" => "qemu",

View File

@@ -12,6 +12,7 @@ pub const RUST_SHIMS: &[&str] = &[
"qemu-runtime-rs",
"qemu-nvidia-gpu-runtime-rs",
"qemu-nvidia-gpu-snp-runtime-rs",
"qemu-nvidia-gpu-tdx-runtime-rs",
"qemu-coco-dev-runtime-rs",
"qemu-se-runtime-rs",
"qemu-snp-runtime-rs",

View File

@@ -101,6 +101,7 @@ scheduling:
"qemu-nvidia-gpu-snp" (dict "memory" "10240Mi" "cpu" "1.0")
"qemu-nvidia-gpu-snp-runtime-rs" (dict "memory" "10240Mi" "cpu" "1.0")
"qemu-nvidia-gpu-tdx" (dict "memory" "10240Mi" "cpu" "1.0")
"qemu-nvidia-gpu-tdx-runtime-rs" (dict "memory" "10240Mi" "cpu" "1.0")
"qemu-cca" (dict "memory" "2048Mi" "cpu" "1.0")
"stratovirt" (dict "memory" "130Mi" "cpu" "250m")
"remote" (dict "memory" "120Mi" "cpu" "250m")

View File

@@ -1,5 +1,5 @@
# Example values file to enable NVIDIA GPU shims
# This includes all NVIDIA GPU-enabled shims: qemu-nvidia-gpu, qemu-nvidia-gpu-runtime-rs, qemu-nvidia-gpu-snp, qemu-nvidia-gpu-snp-runtime-rs, qemu-nvidia-gpu-tdx
# This includes all NVIDIA GPU-enabled shims: qemu-nvidia-gpu, qemu-nvidia-gpu-runtime-rs, qemu-nvidia-gpu-snp, qemu-nvidia-gpu-snp-runtime-rs, qemu-nvidia-gpu-tdx, qemu-nvidia-gpu-tdx-runtime-rs
#
# Usage:
# helm install kata-deploy oci://ghcr.io/kata-containers/kata-deploy-charts/kata-deploy \
@@ -111,6 +111,28 @@ shims:
nvidia.com/cc.ready.state: "true"
intel.feature.node.kubernetes.io/tdx: "true"
qemu-nvidia-gpu-tdx-runtime-rs:
enabled: true
supportedArches:
- amd64
allowedHypervisorAnnotations: []
containerd:
snapshotter: "nydus"
forceGuestPull: false
crio:
guestPull: true
agent:
httpsProxy: ""
noProxy: ""
runtimeClass:
# These labels are automatically added by gpu-operator and NFD
# respectively. Override if you want to use a different label.
# If you don't have NFD, you need to add the tdx label by other
# means to your TDX nodes.
nodeSelector:
nvidia.com/cc.ready.state: "true"
intel.feature.node.kubernetes.io/tdx: "true"
# Default shim per architecture (prefer NVIDIA GPU shims)
defaultShim:
amd64: qemu-nvidia-gpu # Can be changed to qemu-nvidia-gpu-snp or qemu-nvidia-gpu-tdx if preferred

View File

@@ -222,6 +222,24 @@ shims:
nvidia.com/cc.ready.state: "true"
intel.feature.node.kubernetes.io/tdx: "true"
qemu-nvidia-gpu-tdx-runtime-rs:
enabled: ~
supportedArches:
- amd64
allowedHypervisorAnnotations: []
containerd:
snapshotter: "nydus"
forceGuestPull: false
crio:
guestPull: true
agent:
httpsProxy: ""
noProxy: ""
runtimeClass:
nodeSelector:
nvidia.com/cc.ready.state: "true"
intel.feature.node.kubernetes.io/tdx: "true"
qemu-snp:
enabled: ~
supportedArches: