Compare commits

..

3 Commits

Author SHA1 Message Date
Fabiano Fidêncio
1eabd6c729 tests: k8s: coco: Use a var for AUTHENTICATED_IMAGE_PASSWORD
It's a bot password that only has read permissions for that image, no
need to use a secret here.

Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
2026-02-15 23:12:02 +01:00
Fabiano Fidêncio
b1ec7d0c02 build: ci: remove KBUILD_SIGN_PIN entirely
Drop kernel build signing (KBUILD_SIGN_PIN) from CI and from all
scripts that referenced it.

Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-15 18:52:18 +01:00
Fabiano Fidêncio
83dce477d0 build: ci: remove CI_HKD_PATH and s390x boot-image-se build
Drop the CI_HKD_PATH secret and the build-asset-boot-image-se job from
the s390x tarball workflow; the artefact that depended on the host key
was never ever released anyways.

Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-15 16:24:30 +01:00
55 changed files with 434 additions and 738 deletions

View File

@@ -23,8 +23,6 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: false
KBUILD_SIGN_PIN:
required: true
permissions: {}
@@ -102,7 +100,6 @@ jobs:
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
TARGET_BRANCH: ${{ inputs.target-branch }}
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
- name: Parse OCI image name and digest
id: parse-oci-segments
@@ -215,7 +212,6 @@ jobs:
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
TARGET_BRANCH: ${{ inputs.target-branch }}
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
- name: store-artifact ${{ matrix.asset }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2

View File

@@ -23,8 +23,6 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: false
KBUILD_SIGN_PIN:
required: true
permissions: {}
@@ -90,7 +88,6 @@ jobs:
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
TARGET_BRANCH: ${{ inputs.target-branch }}
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
- name: Parse OCI image name and digest
id: parse-oci-segments
@@ -197,7 +194,6 @@ jobs:
ARTEFACT_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
TARGET_BRANCH: ${{ inputs.target-branch }}
RELEASE: ${{ inputs.stage == 'release' && 'yes' || 'no' }}
KBUILD_SIGN_PIN: ${{ contains(matrix.asset, 'nvidia') && secrets.KBUILD_SIGN_PIN || '' }}
- name: store-artifact ${{ matrix.asset }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2

View File

@@ -21,8 +21,6 @@ on:
type: string
default: ""
secrets:
CI_HKD_PATH:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
@@ -197,60 +195,11 @@ jobs:
retention-days: 15
if-no-files-found: error
build-asset-boot-image-se:
name: build-asset-boot-image-se
runs-on: s390x
needs: [build-asset, build-asset-rootfs]
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: Rebase atop of the latest target branch
run: |
./tests/git-helper.sh "rebase-atop-of-the-latest-target-branch"
env:
TARGET_BRANCH: ${{ inputs.target-branch }}
- name: get-artifacts
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
pattern: kata-artifacts-s390x-*${{ inputs.tarball-suffix }}
path: kata-artifacts
merge-multiple: true
- name: Place a host key document
run: |
mkdir -p "host-key-document"
cp "${CI_HKD_PATH}" "host-key-document"
env:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
- name: Build boot-image-se
run: |
./tests/gha-adjust-to-use-prebuilt-components.sh kata-artifacts "boot-image-se"
make boot-image-se-tarball
build_dir=$(readlink -f build)
sudo cp -r "${build_dir}" "kata-build"
sudo chown -R "$(id -u)":"$(id -g)" "kata-build"
env:
HKD_PATH: "host-key-document"
- name: store-artifact boot-image-se
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: kata-artifacts-s390x${{ inputs.tarball-suffix }}
path: kata-build/kata-static-boot-image-se.tar.zst
retention-days: 1
if-no-files-found: error
# We don't need the binaries installed in the rootfs as part of the release tarball, so can delete them now we've built the rootfs
remove-rootfs-binary-artifacts:
name: remove-rootfs-binary-artifacts
runs-on: ubuntu-22.04
needs: [build-asset-rootfs, build-asset-boot-image-se]
needs: [build-asset-rootfs]
strategy:
matrix:
asset:
@@ -331,7 +280,6 @@ jobs:
needs:
- build-asset
- build-asset-rootfs
- build-asset-boot-image-se
- build-asset-shim-v2
permissions:
contents: read

View File

@@ -25,9 +25,8 @@ jobs:
tag: ${{ github.sha }}-weekly
target-branch: ${{ github.ref_name }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -19,15 +19,13 @@ jobs:
target-branch: ${{ github.ref_name }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-checks:
uses: ./.github/workflows/build-checks.yaml

View File

@@ -23,12 +23,10 @@ jobs:
tag: ${{ github.sha }}-nightly
target-branch: ${{ github.ref_name }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -43,12 +43,10 @@ jobs:
target-branch: ${{ github.event.pull_request.base.ref }}
skip-test: ${{ needs.skipper.outputs.skip_test }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
ITA_KEY: ${{ secrets.ITA_KEY }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
NGC_API_KEY: ${{ secrets.NGC_API_KEY }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}

View File

@@ -27,8 +27,6 @@ on:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
KBUILD_SIGN_PIN:
required: true
permissions: {}
@@ -44,8 +42,6 @@ jobs:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
publish-kata-deploy-payload-amd64:
needs: build-kata-static-tarball-amd64
@@ -119,7 +115,7 @@ jobs:
target-branch: ${{ inputs.target-branch }}
tarball-suffix: -${{ inputs.tag }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}

View File

@@ -29,16 +29,12 @@ on:
required: true
AZ_SUBSCRIPTION_ID:
required: true
CI_HKD_PATH:
required: true
ITA_KEY:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
NGC_API_KEY:
required: true
KBUILD_SIGN_PIN:
required: true
permissions: {}
@@ -54,8 +50,6 @@ jobs:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
publish-kata-deploy-payload-amd64:
needs: build-kata-static-tarball-amd64
@@ -86,8 +80,6 @@ jobs:
tarball-suffix: -${{ inputs.tag }}
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
publish-kata-deploy-payload-arm64:
needs: build-kata-static-tarball-arm64
@@ -119,7 +111,6 @@ jobs:
commit-hash: ${{ inputs.commit-hash }}
target-branch: ${{ inputs.target-branch }}
secrets:
CI_HKD_PATH: ${{ secrets.ci_hkd_path }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-kata-static-tarball-ppc64le:
@@ -344,7 +335,7 @@ jobs:
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
AZ_APPID: ${{ secrets.AZ_APPID }}
AZ_TENANT_ID: ${{ secrets.AZ_TENANT_ID }}
AZ_SUBSCRIPTION_ID: ${{ secrets.AZ_SUBSCRIPTION_ID }}
@@ -362,7 +353,7 @@ jobs:
pr-number: ${{ inputs.pr-number }}
target-branch: ${{ inputs.target-branch }}
secrets:
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
run-k8s-tests-on-ppc64le:
if: ${{ inputs.skip-test != 'yes' }}

View File

@@ -24,7 +24,6 @@ jobs:
target-branch: ${{ github.ref_name }}
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-assets-arm64:
permissions:
@@ -39,7 +38,6 @@ jobs:
target-branch: ${{ github.ref_name }}
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-assets-s390x:
permissions:
@@ -53,7 +51,6 @@ jobs:
push-to-registry: yes
target-branch: ${{ github.ref_name }}
secrets:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-assets-ppc64le:

View File

@@ -8,8 +8,6 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: true
KBUILD_SIGN_PIN:
required: true
permissions: {}
@@ -21,7 +19,6 @@ jobs:
stage: release
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
permissions:
contents: read
packages: write

View File

@@ -8,8 +8,6 @@ on:
secrets:
QUAY_DEPLOYER_PASSWORD:
required: true
KBUILD_SIGN_PIN:
required: true
permissions: {}
@@ -21,7 +19,6 @@ jobs:
stage: release
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
permissions:
contents: read
packages: write

View File

@@ -6,8 +6,6 @@ on:
required: true
type: string
secrets:
CI_HKD_PATH:
required: true
QUAY_DEPLOYER_PASSWORD:
required: true
@@ -20,7 +18,6 @@ jobs:
push-to-registry: yes
stage: release
secrets:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
permissions:
contents: read

View File

@@ -35,7 +35,6 @@ jobs:
target-arch: amd64
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-and-push-assets-arm64:
needs: release
@@ -49,7 +48,6 @@ jobs:
target-arch: arm64
secrets:
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
KBUILD_SIGN_PIN: ${{ secrets.KBUILD_SIGN_PIN }}
build-and-push-assets-s390x:
needs: release
@@ -62,7 +60,6 @@ jobs:
with:
target-arch: s390x
secrets:
CI_HKD_PATH: ${{ secrets.CI_HKD_PATH }}
QUAY_DEPLOYER_PASSWORD: ${{ secrets.QUAY_DEPLOYER_PASSWORD }}
build-and-push-assets-ppc64le:

View File

@@ -76,7 +76,7 @@ jobs:
SNAPSHOTTER: ${{ matrix.snapshotter }}
TARGET_ARCH: "s390x"
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:

View File

@@ -69,7 +69,7 @@ jobs:
KUBERNETES: "vanilla"
PULL_TYPE: ${{ matrix.pull-type }}
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
SNAPSHOTTER: ${{ matrix.snapshotter }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2

View File

@@ -63,7 +63,7 @@ jobs:
SNAPSHOTTER: "nydus"
PULL_TYPE: "guest-pull"
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
GH_ITA_KEY: ${{ secrets.ITA_KEY }}
AUTO_GENERATE_POLICY: "yes"
steps:
@@ -168,7 +168,7 @@ jobs:
KUBERNETES: "vanilla"
PULL_TYPE: ${{ matrix.pull-type }}
AUTHENTICATED_IMAGE_USER: ${{ vars.AUTHENTICATED_IMAGE_USER }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ secrets.AUTHENTICATED_IMAGE_PASSWORD }}
AUTHENTICATED_IMAGE_PASSWORD: ${{ vars.AUTHENTICATED_IMAGE_PASSWORD }}
SNAPSHOTTER: ${{ matrix.snapshotter }}
EXPERIMENTAL_FORCE_GUEST_PULL: ${{ matrix.pull-type == 'experimental-force-guest-pull' && matrix.vmm || '' }}
# Caution: current ingress controller used to expose the KBS service

View File

@@ -857,7 +857,7 @@ fn mount_from(
dest.as_str(),
Some(mount_typ.as_str()),
flags,
Some(d.as_str()).filter(|s| !s.is_empty()),
Some(d.as_str()),
)
.inspect_err(|e| log_child!(cfd_log, "mount error: {:?}", e))?;

View File

@@ -298,7 +298,7 @@ ifneq (,$(CLHCMD))
KERNELTYPE_CLH = uncompressed
KERNEL_NAME_CLH = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_CLH))
KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH)
VMROOTFSDRIVER_CLH := virtio-blk-pci
VMROOTFSDRIVER_CLH := virtio-pmem
DEFSANDBOXCGROUPONLY_CLH := true
DEFSTATICRESOURCEMGMT_CLH := false

View File

@@ -22,8 +22,6 @@ rootfs_type = @DEFROOTFSTYPE@
# Block storage driver to be used for the VM rootfs is backed
# by a block device.
#
# virtio-pmem is not supported with Cloud Hypervisor.
vm_rootfs_driver = "@VMROOTFSDRIVER_CLH@"
# Path to the firmware.

View File

@@ -118,11 +118,13 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
// Note how CH handles the different image types:
//
// - A standard image is specified in PmemConfig.
// - An initrd/initramfs is specified in PayloadConfig.
// - An image is specified in DiskConfig.
// Note: pmem is not used as it's not properly supported by Cloud Hypervisor.
// - A confidential guest image is specified by a DiskConfig.
// - If TDX is enabled, the firmware (`td-shim` [1]) must be
// specified in PayloadConfig.
// - A confidential guest initrd is specified by a PayloadConfig with
// firmware.
//
// [1] - https://github.com/confidential-containers/td-shim
let boot_info = cfg.boot_info;
@@ -138,6 +140,14 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
return Err(VmConfigError::NoBootFile);
}
let pmem = if use_initrd || guest_protection_is_tdx(guest_protection_to_use.clone()) {
None
} else {
let pmem = PmemConfig::try_from(&boot_info).map_err(VmConfigError::PmemError)?;
Some(vec![pmem])
};
let payload = Some(
PayloadConfig::try_from((
boot_info.clone(),
@@ -149,7 +159,7 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
let mut disks: Vec<DiskConfig> = vec![];
if use_image {
if use_image && guest_protection_is_tdx(guest_protection_to_use.clone()) {
let disk = DiskConfig::try_from(boot_info).map_err(VmConfigError::DiskError)?;
disks.push(disk);
@@ -189,6 +199,7 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
fs,
net,
devices: host_devices,
pmem,
disks,
vsock: Some(vsock),
rng,
@@ -1645,6 +1656,7 @@ mod tests {
let (memory_info_confidential_guest, mem_config_confidential_guest) =
make_memory_objects(79, usable_max_mem_bytes, true);
let (_, pmem_config_with_image) = make_bootinfo_pmemconfig_objects(image);
let (machine_info, rng_config) = make_machineinfo_rngconfig_objects(entropy_source);
let payload_firmware = None;
@@ -1652,7 +1664,6 @@ mod tests {
let (boot_info_with_initrd, payload_config_with_initrd) =
make_bootinfo_payloadconfig_objects(kernel, initramfs, payload_firmware, None);
let (_, disk_config_with_image) = make_bootinfo_diskconfig_objects(image);
let (_, disk_config_confidential_guest_image) = make_bootinfo_diskconfig_objects(image);
let boot_info_tdx_image = BootInfo {
@@ -1751,7 +1762,7 @@ mod tests {
vsock: Some(valid_vsock.clone()),
// rootfs image specific
disks: Some(vec![disk_config_with_image]),
pmem: Some(vec![pmem_config_with_image]),
payload: Some(PayloadConfig {
kernel: Some(PathBuf::from(kernel)),

View File

@@ -123,12 +123,7 @@ impl CloudHypervisorInner {
}
}
pub fn set_hypervisor_config(&mut self, mut config: HypervisorConfig) {
// virtio-pmem is not supported for Cloud Hypervisor.
if config.boot_info.vm_rootfs_driver == crate::VM_ROOTFS_DRIVER_PMEM {
config.boot_info.vm_rootfs_driver = crate::VM_ROOTFS_DRIVER_BLK.to_string();
}
pub fn set_hypervisor_config(&mut self, config: HypervisorConfig) {
self.config = config;
}

View File

@@ -15,6 +15,7 @@ use crate::utils::vm_cleanup;
use crate::utils::{bytes_to_megs, get_jailer_root, get_sandbox_path, megs_to_bytes};
use crate::MemoryConfig;
use crate::VM_ROOTFS_DRIVER_BLK;
use crate::VM_ROOTFS_DRIVER_PMEM;
use crate::{VcpuThreadIds, VmmState};
use anyhow::{anyhow, Context, Result};
use ch_config::ch_api::cloud_hypervisor_vm_netdev_add_with_fds;
@@ -129,8 +130,12 @@ impl CloudHypervisorInner {
let confidential_guest = cfg.security_info.confidential_guest;
// Note that the configuration option hypervisor.block_device_driver is not used.
// NVDIMM is not supported for Cloud Hypervisor.
let rootfs_driver = VM_ROOTFS_DRIVER_BLK;
let rootfs_driver = if confidential_guest {
// PMEM is not available with TDX.
VM_ROOTFS_DRIVER_BLK
} else {
VM_ROOTFS_DRIVER_PMEM
};
let rootfs_type = match cfg.boot_info.rootfs_type.is_empty() {
true => DEFAULT_CH_ROOTFS_TYPE,
@@ -150,7 +155,6 @@ impl CloudHypervisorInner {
&cfg.boot_info.kernel_verity_params,
rootfs_driver,
rootfs_type,
true,
)?;
let mut console_params = if enable_debug {

View File

@@ -150,7 +150,6 @@ impl DragonballInner {
&self.config.boot_info.kernel_verity_params,
&rootfs_driver,
&self.config.boot_info.rootfs_type,
true,
)?;
kernel_params.append(&mut rootfs_params);
}

View File

@@ -90,7 +90,6 @@ impl FcInner {
&self.config.boot_info.kernel_verity_params,
&self.config.blockdev_info.block_device_driver,
&self.config.boot_info.rootfs_type,
true,
)?;
kernel_params.append(&mut rootfs_params);
kernel_params.append(&mut KernelParams::from_string(

View File

@@ -10,8 +10,8 @@ use crate::{
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_BLK_CCW, VM_ROOTFS_DRIVER_MMIO, VM_ROOTFS_DRIVER_PMEM,
VM_ROOTFS_ROOT_BLK, VM_ROOTFS_ROOT_PMEM,
};
use kata_types::config::hypervisor::{parse_kernel_verity_params, VERITY_BLOCK_SIZE_BYTES};
use kata_types::config::LOG_VPORT_OPTION;
use kata_types::config::hypervisor::{parse_kernel_verity_params, VERITY_BLOCK_SIZE_BYTES};
use kata_types::fs::{
VM_ROOTFS_FILESYSTEM_EROFS, VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS,
};
@@ -66,7 +66,8 @@ struct KernelVerityConfig {
}
fn new_kernel_verity_params(params_string: &str) -> Result<Option<KernelVerityConfig>> {
let cfg = parse_kernel_verity_params(params_string).map_err(|err| anyhow!(err.to_string()))?;
let cfg = parse_kernel_verity_params(params_string)
.map_err(|err| anyhow!(err.to_string()))?;
Ok(cfg.map(|params| KernelVerityConfig {
root_hash: params.root_hash,
@@ -144,7 +145,6 @@ impl KernelParams {
kernel_verity_params: &str,
rootfs_driver: &str,
rootfs_type: &str,
use_dax: bool,
) -> Result<Self> {
let mut params = vec![];
@@ -153,29 +153,16 @@ impl KernelParams {
params.push(Param::new("root", VM_ROOTFS_ROOT_PMEM));
match rootfs_type {
VM_ROOTFS_FILESYSTEM_EXT4 => {
if use_dax {
params.push(Param::new(
"rootflags",
"dax,data=ordered,errors=remount-ro ro",
));
} else {
params
.push(Param::new("rootflags", "data=ordered,errors=remount-ro ro"));
}
params.push(Param::new(
"rootflags",
"dax,data=ordered,errors=remount-ro ro",
));
}
VM_ROOTFS_FILESYSTEM_XFS => {
if use_dax {
params.push(Param::new("rootflags", "dax ro"));
} else {
params.push(Param::new("rootflags", "ro"));
}
params.push(Param::new("rootflags", "dax ro"));
}
VM_ROOTFS_FILESYSTEM_EROFS => {
if use_dax {
params.push(Param::new("rootflags", "dax ro"));
} else {
params.push(Param::new("rootflags", "ro"));
}
params.push(Param::new("rootflags", "dax ro"));
}
_ => {
return Err(anyhow!("Unsupported rootfs type {}", rootfs_type));
@@ -359,7 +346,6 @@ mod tests {
struct TestData<'a> {
rootfs_driver: &'a str,
rootfs_type: &'a str,
use_dax: bool,
expect_params: KernelParams,
result: Result<()>,
}
@@ -367,11 +353,10 @@ mod tests {
#[test]
fn test_rootfs_kernel_params() {
let tests = &[
// EXT4 with DAX
// EXT4
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_PMEM),
@@ -385,7 +370,6 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -396,15 +380,14 @@ mod tests {
},
result: Ok(()),
},
// XFS without DAX
// XFS
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
rootfs_type: VM_ROOTFS_FILESYSTEM_XFS,
use_dax: false,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_PMEM),
Param::new("rootflags", "ro"),
Param::new("rootflags", "dax ro"),
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_XFS),
]
.to_vec(),
@@ -414,7 +397,6 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: VM_ROOTFS_FILESYSTEM_XFS,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -425,11 +407,10 @@ mod tests {
},
result: Ok(()),
},
// EROFS with DAX
// EROFS
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
rootfs_type: VM_ROOTFS_FILESYSTEM_EROFS,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_PMEM),
@@ -443,7 +424,6 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: VM_ROOTFS_FILESYSTEM_EROFS,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -458,7 +438,6 @@ mod tests {
TestData {
rootfs_driver: "foo",
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -473,7 +452,6 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: "foo",
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -488,12 +466,8 @@ mod tests {
for (i, t) in tests.iter().enumerate() {
let msg = format!("test[{i}]: {t:?}");
let result = KernelParams::new_rootfs_kernel_params(
"",
t.rootfs_driver,
t.rootfs_type,
t.use_dax,
);
let result =
KernelParams::new_rootfs_kernel_params("", t.rootfs_driver, t.rootfs_type);
let msg = format!("{msg}, result: {result:?}");
if t.result.is_ok() {
assert!(result.is_ok(), "{}", msg);
@@ -512,7 +486,6 @@ mod tests {
"root_hash=abc,salt=def,data_blocks=1,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)?;
let params_string = params.to_string()?;
assert!(params_string.contains("dm-mod.create="));
@@ -523,7 +496,6 @@ mod tests {
"root_hash=abc,data_blocks=1,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected missing salt error");
@@ -533,7 +505,6 @@ mod tests {
"root_hash=abc,salt=def,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected missing data_blocks error");
@@ -543,7 +514,6 @@ mod tests {
"root_hash=abc,salt=def,data_blocks=foo,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected invalid data_blocks error");
@@ -553,7 +523,6 @@ mod tests {
"root_hash=abc,salt=def,data_blocks=1,data_block_size=4096,hash_block_size=4096,badfield",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected invalid entry error");

View File

@@ -179,17 +179,10 @@ impl Kernel {
let mut kernel_params = KernelParams::new(config.debug_info.enable_debug);
if config.boot_info.initrd.is_empty() {
// DAX is disabled on ARM due to a kernel panic in caches_clean_inval_pou.
#[cfg(target_arch = "aarch64")]
let use_dax = false;
#[cfg(not(target_arch = "aarch64"))]
let use_dax = true;
let mut rootfs_params = KernelParams::new_rootfs_kernel_params(
&config.boot_info.kernel_verity_params,
&config.boot_info.vm_rootfs_driver,
&config.boot_info.rootfs_type,
use_dax,
)
.context("adding rootfs/verity params failed")?;
kernel_params.append(&mut rootfs_params);

View File

@@ -288,7 +288,6 @@ DEFSTATICRESOURCEMGMT_NV = true
DEFDISABLEIMAGENVDIMM ?= false
DEFDISABLEIMAGENVDIMM_NV = true
DEFDISABLEIMAGENVDIMM_CLH ?= true
DEFBINDMOUNTS := []
@@ -789,7 +788,6 @@ USER_VARS += DEFVFIOMODE_SE
USER_VARS += BUILDFLAGS
USER_VARS += DEFDISABLEIMAGENVDIMM
USER_VARS += DEFDISABLEIMAGENVDIMM_NV
USER_VARS += DEFDISABLEIMAGENVDIMM_CLH
USER_VARS += DEFCCAMEASUREMENTALGO
USER_VARS += DEFSHAREDFS_QEMU_CCA_VIRTIOFS
USER_VARS += DEFPODRESOURCEAPISOCK

View File

@@ -222,8 +222,8 @@ hypervisor_loglevel = 1
# If false and nvdimm is supported, use nvdimm device to plug guest image.
# Otherwise virtio-block device is used.
#
# nvdimm is not supported with Cloud Hypervisor or when `confidential_guest = true`.
disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM_CLH@
# nvdimm is not supported when `confidential_guest = true`.
disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
# Enable hot-plugging of VFIO devices to a root-port.
# The default setting is "no-port"

View File

@@ -332,9 +332,6 @@ func (clh *cloudHypervisor) getClhStopSandboxTimeout() time.Duration {
func (clh *cloudHypervisor) setConfig(config *HypervisorConfig) error {
clh.config = *config
// We don't support NVDIMM with Cloud Hypervisor.
clh.config.DisableImageNvdimm = true
return nil
}
@@ -587,8 +584,8 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
// Set initial amount of cpu's for the virtual machine
clh.vmconfig.Cpus = chclient.NewCpusConfig(int32(clh.config.NumVCPUs()), int32(clh.config.DefaultMaxVCPUs))
disableNvdimm := true
enableDax := false
disableNvdimm := (clh.config.DisableImageNvdimm || clh.config.ConfidentialGuest)
enableDax := !disableNvdimm
params, err := getNonUserDefinedKernelParams(hypervisorConfig.RootfsType, disableNvdimm, enableDax, clh.config.Debug, clh.config.ConfidentialGuest, clh.config.IOMMU, hypervisorConfig.KernelVerityParams)
if err != nil {
@@ -610,19 +607,31 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
}
if assetType == types.ImageAsset {
disk := chclient.NewDiskConfig()
disk.Path = &assetPath
disk.SetReadonly(true)
if clh.config.DisableImageNvdimm || clh.config.ConfidentialGuest {
disk := chclient.NewDiskConfig()
disk.Path = &assetPath
disk.SetReadonly(true)
diskRateLimiterConfig := clh.getDiskRateLimiterConfig()
if diskRateLimiterConfig != nil {
disk.SetRateLimiterConfig(*diskRateLimiterConfig)
}
diskRateLimiterConfig := clh.getDiskRateLimiterConfig()
if diskRateLimiterConfig != nil {
disk.SetRateLimiterConfig(*diskRateLimiterConfig)
}
if clh.vmconfig.Disks != nil {
*clh.vmconfig.Disks = append(*clh.vmconfig.Disks, *disk)
if clh.vmconfig.Disks != nil {
*clh.vmconfig.Disks = append(*clh.vmconfig.Disks, *disk)
} else {
clh.vmconfig.Disks = &[]chclient.DiskConfig{*disk}
}
} else {
clh.vmconfig.Disks = &[]chclient.DiskConfig{*disk}
pmem := chclient.NewPmemConfig(assetPath)
*pmem.DiscardWrites = true
pmem.SetIommu(clh.config.IOMMU)
if clh.vmconfig.Pmem != nil {
*clh.vmconfig.Pmem = append(*clh.vmconfig.Pmem, *pmem)
} else {
clh.vmconfig.Pmem = &[]chclient.PmemConfig{*pmem}
}
}
} else {
// assetType == types.InitrdAsset

View File

@@ -69,7 +69,6 @@ func newClhConfig() (HypervisorConfig, error) {
NetRateLimiterOpsMaxRate: int64(0),
NetRateLimiterOpsOneTimeBurst: int64(0),
HotPlugVFIO: config.NoPort,
DisableImageNvdimm: true,
}, nil
}

View File

@@ -1098,10 +1098,8 @@ func (q *qemu) LogAndWait(qemuCmd *exec.Cmd, reader io.ReadCloser) {
q.Logger().WithField("qemuPid", pid).Error(text)
}
}
q.Logger().WithField("qemuPid", pid).Infof("Stop logging QEMU")
if err := qemuCmd.Wait(); err != nil {
q.Logger().WithField("qemuPid", pid).WithField("error", err).Warn("QEMU exited with an error")
}
q.Logger().Infof("Stop logging QEMU (qemuPid=%d)", pid)
qemuCmd.Wait()
}
// StartVM will start the Sandbox's VM.

View File

@@ -332,38 +332,6 @@ func TestQemuArchBaseAppendImage(t *testing.T) {
assert.Equal(expectedOut, devices)
}
func TestQemuArchBaseAppendNvdimmImage(t *testing.T) {
var devices []govmmQemu.Device
assert := assert.New(t)
qemuArchBase := newQemuArchBase()
image, err := os.CreateTemp("", "img")
assert.NoError(err)
defer image.Close()
defer os.Remove(image.Name())
imageStat, err := image.Stat()
assert.NoError(err)
devices, err = qemuArchBase.appendNvdimmImage(devices, image.Name())
assert.NoError(err)
assert.Len(devices, 1)
expectedOut := []govmmQemu.Device{
govmmQemu.Object{
Driver: govmmQemu.NVDIMM,
Type: govmmQemu.MemoryBackendFile,
DeviceID: "nv0",
ID: "mem0",
MemPath: image.Name(),
Size: (uint64)(imageStat.Size()),
ReadOnly: true,
},
}
assert.Equal(expectedOut, devices)
}
func TestQemuArchBaseAppendBridges(t *testing.T) {
var devices []govmmQemu.Device
assert := assert.New(t)

View File

@@ -10,6 +10,7 @@ package virtcontainers
import (
"context"
"fmt"
"os"
"time"
govmmQemu "github.com/kata-containers/kata-containers/src/runtime/pkg/govmm/qemu"
@@ -68,10 +69,9 @@ func newQemuArch(config HypervisorConfig) (qemuArch, error) {
kernelParamsDebug: kernelParamsDebug,
kernelParams: kernelParams,
disableNvdimm: config.DisableImageNvdimm,
// DAX is disabled on ARM due to a kernel panic in caches_clean_inval_pou.
dax: false,
protection: noneProtection,
legacySerial: config.LegacySerial,
dax: true,
protection: noneProtection,
legacySerial: config.LegacySerial,
},
measurementAlgo: config.MeasurementAlgo,
}
@@ -109,6 +109,35 @@ func (q *qemuArm64) appendImage(ctx context.Context, devices []govmmQemu.Device,
return q.appendBlockImage(ctx, devices, path)
}
// There is no nvdimm/readonly feature in qemu 5.1 which is used by arm64 for now,
// so we temporarily add this specific implementation for arm64 here until
// the qemu used by arm64 is capable for that feature
func (q *qemuArm64) appendNvdimmImage(devices []govmmQemu.Device, path string) ([]govmmQemu.Device, error) {
imageFile, err := os.Open(path)
if err != nil {
return nil, err
}
defer imageFile.Close()
imageStat, err := imageFile.Stat()
if err != nil {
return nil, err
}
object := govmmQemu.Object{
Driver: govmmQemu.NVDIMM,
Type: govmmQemu.MemoryBackendFile,
DeviceID: "nv0",
ID: "mem0",
MemPath: path,
Size: (uint64)(imageStat.Size()),
}
devices = append(devices, object)
return devices, nil
}
func (q *qemuArm64) setIgnoreSharedMemoryMigrationCaps(_ context.Context, _ *govmmQemu.QMP) error {
// x-ignore-shared not support in arm64 for now
return nil

View File

@@ -130,6 +130,39 @@ func TestQemuArm64AppendImage(t *testing.T) {
assert.Equal(expectedOut, devices)
}
func TestQemuArm64AppendNvdimmImage(t *testing.T) {
var devices []govmmQemu.Device
assert := assert.New(t)
f, err := os.CreateTemp("", "img")
assert.NoError(err)
defer func() { _ = f.Close() }()
defer func() { _ = os.Remove(f.Name()) }()
imageStat, err := f.Stat()
assert.NoError(err)
cfg := qemuConfig(QemuVirt)
cfg.ImagePath = f.Name()
arm64, err := newQemuArch(cfg)
assert.NoError(err)
expectedOut := []govmmQemu.Device{
govmmQemu.Object{
Driver: govmmQemu.NVDIMM,
Type: govmmQemu.MemoryBackendFile,
DeviceID: "nv0",
ID: "mem0",
MemPath: f.Name(),
Size: (uint64)(imageStat.Size()),
},
}
devices, err = arm64.appendNvdimmImage(devices, f.Name())
assert.NoError(err)
assert.Equal(expectedOut, devices)
}
func TestQemuArm64WithInitrd(t *testing.T) {
assert := assert.New(t)

View File

@@ -52,10 +52,7 @@ mem/B # For terms like "virtio-mem"
memdisk/B
MDEV/AB
NEMU/AB
NFD/AB # Node Feature Discovery
NIC/AB
nodeSelector/B # Kubernetes RuntimeClass scheduling field
nodeSelectors/B
nv/AB # NVIDIA abbreviation (lowercase)
NVDIMM/AB
OCI/AB
@@ -77,20 +74,15 @@ QEMU/AB
RBAC/AB
RDMA/AB
RNG/AB
RuntimeClass/B # Kubernetes resource (node.k8s.io)
RuntimeClasses/B
SaaS/B # Software as a Service
SCSI/AB
SDK/AB
seccomp # secure computing mode
SHA/AB
SEL/AB # IBM Secure Execution for Linux
SPDX/AB
SRIOV/AB
SEV-SNP/B # AMD Secure Encrypted Virtualization - Secure Nested Paging
SVG/AB
TBD/AB
TEE/AB # Trusted Execution Environment
TOC/AB
TOML/AB
TTY/AB

View File

@@ -1,4 +1,4 @@
417
409
ACPI/AB
ACS/AB
API/AB
@@ -93,7 +93,6 @@ Mellanox/B
Minikube/B
MonitorTest/A
NEMU/AB
NFD/AB
NIC/AB
NVDIMM/AB
NVIDIA/A
@@ -135,14 +134,10 @@ RBAC/AB
RDMA/AB
RHEL/B
RNG/AB
RuntimeClass/B
RuntimeClasses/B
Rustlang/B
SCSI/AB
SDK/AB
SEL/AB
SELinux/B
SEV-SNP/B
SHA/AB
SLES/B
SPDX/AB
@@ -158,7 +153,6 @@ Submodule/A
Sysbench/B
TBD/AB
TDX
TEE/AB
TOC/AB
TOML/AB
TTY/AB
@@ -312,8 +306,6 @@ nack/AB
namespace/ABCD
netlink
netns/AB
nodeSelector/B
nodeSelectors/B
nv/AB
nvidia/A
onwards

View File

@@ -175,7 +175,7 @@ function deploy_kata() {
ANNOTATIONS="default_vcpus"
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
ANNOTATIONS="image kernel default_vcpus cc_init_data"
ANNOTATIONS="image kernel default_vcpus disable_image_nvdimm cc_init_data"
fi
if [[ "${KATA_HYPERVISOR}" = "qemu" ]]; then
ANNOTATIONS="image initrd kernel default_vcpus"

View File

@@ -99,31 +99,23 @@ add_annotations_to_yaml() {
esac
}
add_cbl_mariner_annotation_to_yaml() {
local -r yaml_file="$1"
local -r mariner_annotation_kernel="io.katacontainers.config.hypervisor.kernel"
local -r mariner_kernel_path="/usr/share/cloud-hypervisor/vmlinux.bin"
local -r mariner_annotation_image="io.katacontainers.config.hypervisor.image"
local -r mariner_image_path="/opt/kata/share/kata-containers/kata-containers-mariner.img"
add_annotations_to_yaml "${yaml_file}" "${mariner_annotation_kernel}" "${mariner_kernel_path}"
add_annotations_to_yaml "${yaml_file}" "${mariner_annotation_image}" "${mariner_image_path}"
}
add_cbl_mariner_specific_annotations() {
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
info "Adding annotations for cbl-mariner"
info "Add kernel and image path and annotations for cbl-mariner"
local mariner_annotation_kernel="io.katacontainers.config.hypervisor.kernel"
local mariner_kernel_path="/usr/share/cloud-hypervisor/vmlinux.bin"
local mariner_annotation_image="io.katacontainers.config.hypervisor.image"
local mariner_image_path="/opt/kata/share/kata-containers/kata-containers-mariner.img"
local mariner_annotation_disable_image_nvdimm="io.katacontainers.config.hypervisor.disable_image_nvdimm"
local mariner_disable_image_nvdimm=true
for K8S_TEST_YAML in runtimeclass_workloads_work/*.yaml
do
add_cbl_mariner_annotation_to_yaml "${K8S_TEST_YAML}"
done
for K8S_TEST_YAML in runtimeclass_workloads_work/openvpn/*.yaml
do
add_cbl_mariner_annotation_to_yaml "${K8S_TEST_YAML}"
add_annotations_to_yaml "${K8S_TEST_YAML}" "${mariner_annotation_kernel}" "${mariner_kernel_path}"
add_annotations_to_yaml "${K8S_TEST_YAML}" "${mariner_annotation_image}" "${mariner_image_path}"
add_annotations_to_yaml "${K8S_TEST_YAML}" "${mariner_annotation_disable_image_nvdimm}" "${mariner_disable_image_nvdimm}"
done
fi
}

View File

@@ -20,7 +20,6 @@ readonly BUILD_DIR="/kata-containers/tools/packaging/kata-deploy/local-build/bui
script_dir="$(dirname "$(readlink -f "$0")")"
readonly SCRIPT_DIR="${script_dir}/nvidia"
KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-}
AGENT_POLICY="${AGENT_POLICY:-no}"
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:?NVIDIA_GPU_STACK must be set}

View File

@@ -58,7 +58,6 @@ REPO_URL=${REPO_URL:-""}
REPO_URL_X86_64=${REPO_URL_X86_64:-""}
REPO_COMPONENTS=${REPO_COMPONENTS:-""}
KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-""}
NVIDIA_GPU_STACK=${NVIDIA_GPU_STACK:-""}
BUILD_VARIANT=${BUILD_VARIANT:-""}
@@ -582,7 +581,6 @@ build_rootfs_distro()
--env AGENT_POLICY="${AGENT_POLICY}" \
--env CONFIDENTIAL_GUEST="${CONFIDENTIAL_GUEST}" \
--env NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK}" \
--env KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN}" \
-v "${repo_dir}":"/kata-containers" \
-v "${ROOTFS_DIR}":"/rootfs" \
-v "${script_dir}/../scripts":"/scripts" \

View File

@@ -291,45 +291,15 @@ fn remove_custom_runtime_configs(config: &Config) -> Result<()> {
/// Note: The src parameter is kept to allow for unit testing with temporary directories,
/// even though in production it always uses /opt/kata-artifacts/opt/kata
///
/// Symlinks in the source tree are preserved at the destination (recreated as symlinks
/// instead of copying the target file). Absolute targets under the source root are
/// rewritten to the destination root so they remain valid.
fn copy_artifacts(src: &str, dst: &str) -> Result<()> {
let src_path = Path::new(src);
for entry in WalkDir::new(src).follow_links(false) {
for entry in WalkDir::new(src) {
let entry = entry?;
let src_path_entry = entry.path();
let relative_path = src_path_entry.strip_prefix(src)?;
let src_path = entry.path();
let relative_path = src_path.strip_prefix(src)?;
let dst_path = Path::new(dst).join(relative_path);
if entry.file_type().is_dir() {
fs::create_dir_all(&dst_path)?;
} else if entry.file_type().is_symlink() {
// Preserve symlinks: create a symlink at destination instead of copying the target
let link_target = fs::read_link(src_path_entry)
.with_context(|| format!("Failed to read symlink: {:?}", src_path_entry))?;
let new_target: std::path::PathBuf = if link_target.is_absolute() {
// Rewrite absolute targets that point inside the source tree
if let Ok(rel) = link_target.strip_prefix(src_path) {
Path::new(dst).join(rel)
} else {
link_target.into()
}
} else {
link_target.into()
};
if let Some(parent) = dst_path.parent() {
fs::create_dir_all(parent)?;
}
match fs::remove_file(&dst_path) {
Ok(()) => {}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
Err(e) => return Err(e.into()),
}
std::os::unix::fs::symlink(&new_target, &dst_path)
.with_context(|| format!("Failed to create symlink {:?} -> {:?}", dst_path, new_target))?;
} else {
if let Some(parent) = dst_path.parent() {
fs::create_dir_all(parent)?;
@@ -347,7 +317,7 @@ fn copy_artifacts(src: &str, dst: &str) -> Result<()> {
Err(e) => return Err(e.into()), // Other errors should be propagated
}
fs::copy(src_path_entry, &dst_path)?;
fs::copy(src_path, &dst_path)?;
}
}
Ok(())
@@ -918,54 +888,65 @@ async fn configure_mariner(config: &Config) -> Result<()> {
#[cfg(test)]
mod tests {
use super::*;
use rstest::rstest;
#[rstest]
#[case("qemu", "qemu")]
#[case("qemu-tdx", "qemu")]
#[case("qemu-snp", "qemu")]
#[case("qemu-se", "qemu")]
#[case("qemu-coco-dev", "qemu")]
#[case("qemu-cca", "qemu")]
#[case("qemu-nvidia-gpu", "qemu")]
#[case("qemu-nvidia-gpu-tdx", "qemu")]
#[case("qemu-nvidia-gpu-snp", "qemu")]
#[case("qemu-runtime-rs", "qemu")]
#[case("qemu-coco-dev-runtime-rs", "qemu")]
#[case("qemu-se-runtime-rs", "qemu")]
#[case("qemu-snp-runtime-rs", "qemu")]
#[case("qemu-tdx-runtime-rs", "qemu")]
fn test_get_hypervisor_name_qemu_variants(#[case] shim: &str, #[case] expected: &str) {
assert_eq!(get_hypervisor_name(shim).unwrap(), expected);
#[test]
fn test_get_hypervisor_name_qemu_variants() {
// Test all QEMU variants
assert_eq!(get_hypervisor_name("qemu").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-tdx").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-snp").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-se").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-coco-dev").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-cca").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-nvidia-gpu").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-nvidia-gpu-tdx").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-nvidia-gpu-snp").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-runtime-rs").unwrap(), "qemu");
assert_eq!(
get_hypervisor_name("qemu-coco-dev-runtime-rs").unwrap(),
"qemu"
);
assert_eq!(get_hypervisor_name("qemu-se-runtime-rs").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-snp-runtime-rs").unwrap(), "qemu");
assert_eq!(get_hypervisor_name("qemu-tdx-runtime-rs").unwrap(), "qemu");
}
#[rstest]
#[case("clh", "clh")]
#[case("cloud-hypervisor", "cloud-hypervisor")]
#[case("dragonball", "dragonball")]
#[case("fc", "firecracker")]
#[case("firecracker", "firecracker")]
#[case("remote", "remote")]
fn test_get_hypervisor_name_other_hypervisors(#[case] shim: &str, #[case] expected: &str) {
assert_eq!(get_hypervisor_name(shim).unwrap(), expected);
#[test]
fn test_get_hypervisor_name_other_hypervisors() {
// Test other hypervisors
assert_eq!(get_hypervisor_name("clh").unwrap(), "clh");
assert_eq!(
get_hypervisor_name("cloud-hypervisor").unwrap(),
"cloud-hypervisor"
);
assert_eq!(get_hypervisor_name("dragonball").unwrap(), "dragonball");
assert_eq!(get_hypervisor_name("fc").unwrap(), "firecracker");
assert_eq!(get_hypervisor_name("firecracker").unwrap(), "firecracker");
assert_eq!(get_hypervisor_name("remote").unwrap(), "remote");
}
#[rstest]
#[case("")]
#[case("unknown-shim")]
#[case("custom")]
fn test_get_hypervisor_name_unknown(#[case] shim: &str) {
let result = get_hypervisor_name(shim);
#[test]
fn test_get_hypervisor_name_unknown() {
// Test unknown shim returns error with clear message
let result = get_hypervisor_name("unknown-shim");
assert!(result.is_err(), "Unknown shim should return an error");
let err_msg = result.unwrap_err().to_string();
assert!(
err_msg.contains(&format!("Unknown shim '{}'", shim)),
err_msg.contains("Unknown shim 'unknown-shim'"),
"Error message should mention the unknown shim"
);
assert!(
err_msg.contains("Valid shims are:"),
"Error message should list valid shims"
);
let result = get_hypervisor_name("custom");
assert!(result.is_err(), "Custom shim should return an error");
let err_msg = result.unwrap_err().to_string();
assert!(
err_msg.contains("Unknown shim 'custom'"),
"Error message should mention the custom shim"
);
}
#[test]
@@ -1042,36 +1023,10 @@ mod tests {
}
#[test]
fn test_copy_artifacts_preserves_symlinks() {
let src_dir = tempfile::tempdir().unwrap();
let dst_dir = tempfile::tempdir().unwrap();
// Create a real file and a symlink pointing to it
let real_file = src_dir.path().join("real-file.txt");
fs::write(&real_file, "actual content").unwrap();
let link_path = src_dir.path().join("link-to-real");
std::os::unix::fs::symlink(&real_file, &link_path).unwrap();
copy_artifacts(
src_dir.path().to_str().unwrap(),
dst_dir.path().to_str().unwrap(),
)
.unwrap();
let dst_link = dst_dir.path().join("link-to-real");
let dst_real = dst_dir.path().join("real-file.txt");
assert!(dst_real.exists(), "real file should be copied");
assert!(dst_link.is_symlink(), "destination should be a symlink");
assert_eq!(
fs::read_link(&dst_link).unwrap(),
dst_real,
"symlink should point to the real file in the same tree"
);
assert_eq!(
fs::read_to_string(&dst_link).unwrap(),
"actual content",
"following the symlink should yield the real content"
);
fn test_get_hypervisor_name_empty() {
let result = get_hypervisor_name("");
assert!(result.is_err());
let err_msg = result.unwrap_err().to_string();
assert!(err_msg.contains("Unknown shim"));
}
}

View File

@@ -113,7 +113,7 @@ fn write_containerd_runtime_config(
// and setting the snapshotter only on the runtime plugin is not enough for image
// pull/prepare.
//
// The images plugin must have runtime_platforms.<runtime>.snapshotter so it
// The images plugin must have runtime_platform.<runtime>.snapshotter so it
// uses the correct snapshotter per runtime (e.g. nydus, erofs).
//
// A PR on the containerd side is open so we can rely on the runtime plugin
@@ -122,7 +122,7 @@ fn write_containerd_runtime_config(
toml_utils::set_toml_value(
config_file,
&format!(
".plugins.{}.runtime_platforms.\"{}\".snapshotter",
".plugins.{}.runtime_platform.\"{}\".snapshotter",
CONTAINERD_CRI_IMAGES_PLUGIN_ID,
params.runtime_name
),
@@ -569,167 +569,102 @@ pub fn snapshotter_handler_mapping_validation_check(config: &Config) -> Result<(
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::toml as toml_utils;
use rstest::rstest;
use std::path::Path;
use tempfile::NamedTempFile;
fn make_params(
runtime_name: &str,
snapshotter: Option<&str>,
) -> ContainerdRuntimeParams {
ContainerdRuntimeParams {
runtime_name: runtime_name.to_string(),
runtime_path: "\"/opt/kata/bin/kata-runtime\"".to_string(),
config_path: "\"/opt/kata/share/defaults/kata-containers/configuration-qemu.toml\""
.to_string(),
pod_annotations: "[\"io.katacontainers.*\"]",
snapshotter: snapshotter.map(|s| s.to_string()),
}
#[test]
fn test_check_containerd_snapshotter_version_support_1_6_with_mapping() {
// Version 1.6 with snapshotter mapping should fail
let result = check_containerd_snapshotter_version_support("containerd://1.6.28", true);
assert!(result.is_err());
assert!(result
.unwrap_err()
.to_string()
.contains("kata-deploy only supports snapshotter configuration with containerd 1.7 or newer"));
}
/// CRI images runtime_platforms snapshotter is set only for v3 config when a snapshotter is configured.
#[rstest]
#[case(CONTAINERD_V3_RUNTIME_PLUGIN_ID, Some("\"nydus\""), "kata-qemu", true)]
#[case(CONTAINERD_V2_CRI_PLUGIN_ID, Some("\"nydus\""), "kata-qemu", false)]
#[case(CONTAINERD_V3_RUNTIME_PLUGIN_ID, None, "kata-qemu", false)]
#[case(CONTAINERD_V3_RUNTIME_PLUGIN_ID, Some("\"erofs\""), "kata-clh", true)]
fn test_write_containerd_runtime_config_cri_images_runtime_platforms_snapshotter(
#[case] pluginid: &str,
#[case] snapshotter: Option<&str>,
#[case] runtime_name: &str,
#[case] expect_runtime_platforms_set: bool,
) {
let file = NamedTempFile::new().unwrap();
let path = file.path();
std::fs::write(path, "").unwrap();
#[test]
fn test_check_containerd_snapshotter_version_support_1_6_without_mapping() {
// Version 1.6 without snapshotter mapping should pass (no mapping means no check needed)
let result = check_containerd_snapshotter_version_support("containerd://1.6.28", false);
assert!(result.is_ok());
}
let params = make_params(runtime_name, snapshotter);
write_containerd_runtime_config(path, pluginid, &params).unwrap();
#[test]
fn test_check_containerd_snapshotter_version_support_1_7_with_mapping() {
// Version 1.7 with snapshotter mapping should pass
let result = check_containerd_snapshotter_version_support("containerd://1.7.15", true);
assert!(result.is_ok());
}
let images_snapshotter_path = format!(
".plugins.\"io.containerd.cri.v1.images\".runtime_platforms.\"{}\".snapshotter",
runtime_name
);
let result = toml_utils::get_toml_value(Path::new(path), &images_snapshotter_path);
#[test]
fn test_check_containerd_snapshotter_version_support_2_0_with_mapping() {
// Version 2.0 with snapshotter mapping should pass
let result = check_containerd_snapshotter_version_support("containerd://2.0.0", true);
assert!(result.is_ok());
}
if expect_runtime_platforms_set {
let value = result.unwrap_or_else(|e| {
panic!(
"expected CRI images runtime_platforms.{} snapshotter to be set: {}",
runtime_name, e
)
});
assert_eq!(
value,
snapshotter.unwrap().trim_matches('"'),
"runtime_platforms snapshotter value"
);
} else {
#[test]
fn test_check_containerd_snapshotter_version_support_without_prefix() {
// Version without containerd:// prefix should still work
let result = check_containerd_snapshotter_version_support("1.6.28", true);
assert!(result.is_err());
}
#[test]
fn test_check_containerd_snapshotter_version_support_1_6_variants() {
// Test various 1.6.x versions
assert!(check_containerd_snapshotter_version_support("containerd://1.6.0", true).is_err());
assert!(check_containerd_snapshotter_version_support("containerd://1.6.28", true).is_err());
assert!(check_containerd_snapshotter_version_support("containerd://1.6.999", true).is_err());
}
#[test]
fn test_check_containerd_snapshotter_version_support_1_7_variants() {
// Test various 1.7+ versions should pass
assert!(check_containerd_snapshotter_version_support("containerd://1.7.0", true).is_ok());
assert!(check_containerd_snapshotter_version_support("containerd://1.7.15", true).is_ok());
assert!(check_containerd_snapshotter_version_support("containerd://1.8.0", true).is_ok());
}
#[test]
fn test_check_containerd_erofs_version_support() {
// Versions that should pass (2.2.0+)
let passing_versions = [
"containerd://2.2.0",
"containerd://2.2.0-rc.1",
"containerd://2.2.1",
"containerd://2.3.0",
"containerd://3.0.0",
"containerd://2.3.0-beta.0",
"2.2.0", // without prefix
];
for version in passing_versions {
assert!(
result.is_err(),
"expected CRI images runtime_platforms.{} snapshotter not to be set for pluginid={:?} snapshotter={:?}",
runtime_name,
pluginid,
snapshotter
check_containerd_erofs_version_support(version).is_ok(),
"Expected {} to pass",
version
);
}
}
/// Written containerd config (e.g. drop-in) must not start with blank lines when written to an initially empty file.
#[rstest]
#[case(CONTAINERD_V3_RUNTIME_PLUGIN_ID)]
#[case(CONTAINERD_V2_CRI_PLUGIN_ID)]
fn test_write_containerd_runtime_config_empty_file_no_leading_newlines(
#[case] pluginid: &str,
) {
let file = NamedTempFile::new().unwrap();
let path = file.path();
std::fs::write(path, "").unwrap();
let params = make_params("kata-qemu", Some("\"nydus\""));
write_containerd_runtime_config(path, pluginid, &params).unwrap();
let content = std::fs::read_to_string(path).unwrap();
assert!(
!content.starts_with('\n'),
"containerd config must not start with newline(s), got {} leading newlines (pluginid={})",
content.chars().take_while(|&c| c == '\n').count(),
pluginid
);
assert!(
content.trim_start().starts_with('['),
"config should start with a TOML table"
);
}
#[rstest]
#[case("containerd://1.6.28", true, false, Some("kata-deploy only supports snapshotter configuration with containerd 1.7 or newer"))]
#[case("containerd://1.6.28", false, true, None)]
#[case("containerd://1.6.0", true, false, None)]
#[case("containerd://1.6.999", true, false, None)]
#[case("containerd://1.7.0", true, true, None)]
#[case("containerd://1.7.15", true, true, None)]
#[case("containerd://1.8.0", true, true, None)]
#[case("containerd://2.0.0", true, true, None)]
#[case("1.6.28", true, false, None)]
fn test_check_containerd_snapshotter_version_support(
#[case] version: &str,
#[case] has_mapping: bool,
#[case] expect_ok: bool,
#[case] expected_error_substring: Option<&str>,
) {
let result = check_containerd_snapshotter_version_support(version, has_mapping);
if expect_ok {
assert!(result.is_ok(), "expected ok for version={} has_mapping={}", version, has_mapping);
} else {
assert!(result.is_err(), "expected err for version={} has_mapping={}", version, has_mapping);
if let Some(sub) = expected_error_substring {
assert!(
result.unwrap_err().to_string().contains(sub),
"error should contain {:?}",
sub
);
}
// Versions that should fail (< 2.2.0)
let failing_versions = [
("containerd://2.1.0", "containerd must be 2.2.0 or newer"),
("containerd://2.1.5-rc.1", "containerd must be 2.2.0 or newer"),
("containerd://2.0.0", "containerd must be 2.2.0 or newer"),
("containerd://1.7.0", "containerd must be 2.2.0 or newer"),
("containerd://1.6.28", "containerd must be 2.2.0 or newer"),
("2.1.0", "containerd must be 2.2.0 or newer"), // without prefix
("invalid", "Invalid containerd version format"),
("containerd://abc.2.0", "Failed to parse major version"),
];
for (version, expected_error) in failing_versions {
let result = check_containerd_erofs_version_support(version);
assert!(result.is_err(), "Expected {} to fail", version);
assert!(
result.unwrap_err().to_string().contains(expected_error),
"Expected error for {} to contain '{}'",
version,
expected_error
);
}
}
#[rstest]
#[case("containerd://2.2.0")]
#[case("containerd://2.2.0-rc.1")]
#[case("containerd://2.2.1")]
#[case("containerd://2.3.0")]
#[case("containerd://3.0.0")]
#[case("containerd://2.3.0-beta.0")]
#[case("2.2.0")]
fn test_check_containerd_erofs_version_support_passing(#[case] version: &str) {
assert!(
check_containerd_erofs_version_support(version).is_ok(),
"Expected {} to pass",
version
);
}
#[rstest]
#[case("containerd://2.1.0", "containerd must be 2.2.0 or newer")]
#[case("containerd://2.1.5-rc.1", "containerd must be 2.2.0 or newer")]
#[case("containerd://2.0.0", "containerd must be 2.2.0 or newer")]
#[case("containerd://1.7.0", "containerd must be 2.2.0 or newer")]
#[case("containerd://1.6.28", "containerd must be 2.2.0 or newer")]
#[case("2.1.0", "containerd must be 2.2.0 or newer")]
#[case("invalid", "Invalid containerd version format")]
#[case("containerd://abc.2.0", "Failed to parse major version")]
fn test_check_containerd_erofs_version_support_failing(
#[case] version: &str,
#[case] expected_error: &str,
) {
let result = check_containerd_erofs_version_support(version);
assert!(result.is_err(), "Expected {} to fail", version);
assert!(
result.unwrap_err().to_string().contains(expected_error),
"Expected error for {} to contain '{}'",
version,
expected_error
);
}
}

View File

@@ -65,23 +65,17 @@ fn split_non_toml_header(content: &str) -> (&str, &str) {
/// Write a TOML file with an optional non-TOML header (e.g. K3s template line).
/// Ensures the header ends with a newline before the TOML body.
/// Trims leading newlines from the serialized document to avoid many blank lines
/// when the file was initially empty (e.g. containerd drop-in).
fn write_toml_with_header(
file_path: &Path,
header: &str,
doc: &DocumentMut,
) -> Result<()> {
let normalized_header = if header.is_empty() {
String::new()
} else if header.ends_with('\n') {
let normalized_header = if header.ends_with('\n') {
header.to_string()
} else {
format!("{header}\n")
};
let body = doc.to_string();
let body_trimmed = body.trim_start_matches('\n');
std::fs::write(file_path, format!("{}{}", normalized_header, body_trimmed))
std::fs::write(file_path, format!("{}{}", normalized_header, doc.to_string()))
.with_context(|| format!("Failed to write TOML file: {file_path:?}"))?;
Ok(())
}
@@ -615,37 +609,6 @@ mod tests {
assert!(content.contains("runtime_type"));
}
#[rstest]
#[case("", "")]
#[case("{{ template \"base\" . }}\n", "{{ template \"base\" . }}\n")]
fn test_set_toml_value_empty_file_no_leading_newlines(
#[case] initial_content: &str,
#[case] expected_prefix: &str,
) {
let file = NamedTempFile::new().unwrap();
let path = file.path();
std::fs::write(path, initial_content).unwrap();
set_toml_value(
path,
".plugins.\"io.containerd.cri.v1.runtime\".containerd.runtimes.kata-qemu.runtime_type",
"\"io.containerd.kata-qemu.v2\"",
)
.unwrap();
let content = std::fs::read_to_string(path).unwrap();
assert!(content.starts_with(expected_prefix), "header/prefix must be preserved");
let body_start = content.strip_prefix(expected_prefix).unwrap();
assert!(
!body_start.starts_with('\n'),
"written TOML body must not start with newline(s) after header, got {} leading newlines",
body_start.chars().take_while(|&c| c == '\n').count()
);
assert!(
body_start.trim_start().starts_with('['),
"body should start with a TOML table"
);
}
#[test]
fn test_get_toml_value() {
let file = NamedTempFile::new().unwrap();
@@ -778,22 +741,24 @@ mod tests {
assert_eq!(agent_debug, "false");
}
#[rstest]
#[case("test.string_value", "test_string", "test_string")]
#[case("test.bool_value", "true", "true")]
#[case("test.int_value", "42", "42")]
fn test_toml_value_types(
#[case] path: &str,
#[case] value: &str,
#[case] expected: &str,
) {
#[test]
fn test_toml_value_types() {
let file = NamedTempFile::new().unwrap();
let file_path = file.path();
std::fs::write(file_path, "").unwrap();
let path = file.path();
std::fs::write(path, "").unwrap();
set_toml_value(file_path, path, value).unwrap();
let got = get_toml_value(file_path, path).unwrap();
assert_eq!(got, expected);
// Test different value types
set_toml_value(path, "test.string_value", "test_string").unwrap();
set_toml_value(path, "test.bool_value", "true").unwrap();
set_toml_value(path, "test.int_value", "42").unwrap();
let string_val = get_toml_value(path, "test.string_value").unwrap();
let bool_val = get_toml_value(path, "test.bool_value").unwrap();
let int_val = get_toml_value(path, "test.int_value").unwrap();
assert_eq!(string_val, "test_string");
assert_eq!(bool_val, "true");
assert_eq!(int_val, "42");
}
#[test]
@@ -1305,20 +1270,17 @@ kernel_params = "console=hvc0"
.contains("Failed to read TOML file"));
}
#[rstest]
#[case("get")]
#[case("set")]
fn test_invalid_toml(#[case] op: &str) {
#[test]
fn test_get_toml_value_invalid_toml() {
let temp_file = NamedTempFile::new().unwrap();
let temp_path = temp_file.path();
// Write invalid TOML
std::fs::write(temp_path, "this is not [ valid toml {").unwrap();
let result = match op {
"get" => get_toml_value(temp_path, "some.path").map(drop),
"set" => set_toml_value(temp_path, "some.path", "\"value\""),
_ => panic!("unknown op"),
};
assert!(result.is_err(), "Should fail parsing invalid TOML (op={})", op);
let result = get_toml_value(temp_path, "some.path");
assert!(result.is_err(), "Should fail parsing invalid TOML");
// Just verify it's an error, don't check specific message
}
#[test]
@@ -1343,6 +1305,18 @@ kernel_params = "console=hvc0"
assert!(result.is_err());
}
#[test]
fn test_set_toml_value_invalid_toml() {
let temp_file = NamedTempFile::new().unwrap();
let temp_path = temp_file.path();
// Write invalid TOML
std::fs::write(temp_path, "this is not [ valid toml {").unwrap();
let result = set_toml_value(temp_path, "some.path", "\"value\"");
assert!(result.is_err(), "Should fail parsing invalid TOML");
}
#[test]
fn test_append_to_toml_array_nonexistent_file() {
let result = append_to_toml_array(
@@ -1353,25 +1327,30 @@ kernel_params = "console=hvc0"
assert!(result.is_err());
}
#[rstest]
#[case("append")]
#[case("get")]
fn test_toml_array_not_an_array(#[case] op: &str) {
#[test]
fn test_append_to_toml_array_not_an_array() {
let temp_file = NamedTempFile::new().unwrap();
let temp_path = temp_file.path();
// Write TOML with a string, not an array
std::fs::write(temp_path, "[section]\nkey = \"value\"").unwrap();
let result = match op {
"append" => append_to_toml_array(temp_path, "section.key", "\"item\"").map(drop),
"get" => get_toml_array(temp_path, "section.key").map(drop),
_ => panic!("unknown op"),
};
assert!(result.is_err(), "op={}", op);
assert!(
result.unwrap_err().to_string().contains("not an array"),
"op={}",
op
);
let result = append_to_toml_array(temp_path, "section.key", "\"item\"");
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("not an array"));
}
#[test]
fn test_get_toml_array_not_an_array() {
let temp_file = NamedTempFile::new().unwrap();
let temp_path = temp_file.path();
// Write TOML with a string, not an array
std::fs::write(temp_path, "[section]\nkey = \"value\"").unwrap();
let result = get_toml_array(temp_path, "section.key");
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("not an array"));
}
#[test]

View File

@@ -229,7 +229,6 @@ shims:
agent:
httpsProxy: ""
noProxy: ""
# Optional: set runtimeClass.nodeSelector to pin TEE to specific nodes (always applied). If unset, NFD TEE labels are auto-injected when NFD is detected.
# Default shim per architecture
defaultShim:
@@ -312,8 +311,8 @@ helm install kata-deploy oci://ghcr.io/kata-containers/kata-deploy-charts/kata-d
Includes:
- `qemu-snp` - AMD SEV-SNP (amd64)
- `qemu-tdx` - Intel TDX (amd64)
- `qemu-se` - IBM Secure Execution for Linux (SEL) (s390x)
- `qemu-se-runtime-rs` - IBM Secure Execution for Linux (SEL) Rust runtime (s390x)
- `qemu-se` - IBM Secure Execution (s390x)
- `qemu-se-runtime-rs` - IBM Secure Execution Rust runtime (s390x)
- `qemu-cca` - Arm Confidential Compute Architecture (arm64)
- `qemu-coco-dev` - Confidential Containers development (amd64, s390x)
- `qemu-coco-dev-runtime-rs` - Confidential Containers development Rust runtime (amd64, s390x)
@@ -335,27 +334,6 @@ Includes:
**Note**: These example files are located in the chart directory. When installing from the OCI registry, you'll need to download them separately or clone the repository to access them.
### RuntimeClass Node Selectors for TEE Shims
**Manual configuration:** Any `nodeSelector` you set under `shims.<shim>.runtimeClass.nodeSelector`
is **always applied** to that shim's RuntimeClass, whether or not NFD is present. Use this when
you want to pin TEE workloads to specific nodes (e.g. without NFD, or with custom labels).
**Auto-inject when NFD is present:** If you do *not* set a `runtimeClass.nodeSelector` for a
TEE shim, the chart can **automatically inject** NFD-based labels when NFD is detected in the
cluster (deployed by this chart with `node-feature-discovery.enabled=true` or found externally):
- AMD SEV-SNP shims: `amd.feature.node.kubernetes.io/snp: "true"`
- Intel TDX shims: `intel.feature.node.kubernetes.io/tdx: "true"`
- IBM Secure Execution for Linux (SEL) shims (s390x): `feature.node.kubernetes.io/cpu-security.se.enabled: "true"`
The chart uses Helm's `lookup` function to detect NFD (by looking for the
`node-feature-discovery-worker` DaemonSet). Auto-inject only runs when NFD is detected and
no manual `runtimeClass.nodeSelector` is set for that shim.
**Note**: NFD detection requires cluster access. During `helm template` (dry-run without a
cluster), external NFD is not seen, so auto-injected labels are not added. Manual
`runtimeClass.nodeSelector` values are still applied in all cases.
## `RuntimeClass` Management
**NEW**: Starting with Kata Containers v3.23.0, `runtimeClasses` are managed by

View File

@@ -1,68 +1,9 @@
{{- /*
Render a single RuntimeClass. Params (dict): root (.), shim, config, shimConfig,
nameOverride (optional; if set, use as metadata.name for default RC), useShimNodeSelectors.
*/ -}}
{{- define "kata-deploy.runtimeclass" -}}
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
{{- if .root.Values.env.multiInstallSuffix }}
name: kata-{{ .shim }}-{{ .root.Values.env.multiInstallSuffix }}
{{- else if .nameOverride }}
name: {{ .nameOverride }}
{{- else }}
name: kata-{{ .shim }}
{{- end }}
labels:
app.kubernetes.io/managed-by: kata-deploy
{{- if .root.Values.env.multiInstallSuffix }}
kata-deploy/instance: {{ .root.Values.env.multiInstallSuffix | quote }}
{{- else }}
kata-deploy/instance: "default"
{{- end }}
{{- if .root.Values.env.multiInstallSuffix }}
handler: kata-{{ .shim }}-{{ .root.Values.env.multiInstallSuffix }}
{{- else }}
handler: kata-{{ .shim }}
{{- end }}
overhead:
podFixed:
memory: {{ .config.memory | quote }}
cpu: {{ .config.cpu | quote }}
scheduling:
nodeSelector:
katacontainers.io/kata-runtime: "true"
{{- /* TEE shims: snp, tdx, -se, -se-runtime-rs (SE = IBM Secure Execution / SEL) */ -}}
{{- $isTeeShim := or (contains "snp" .shim) (contains "tdx" .shim) (hasSuffix "-se" .shim) (hasSuffix "-se-runtime-rs" .shim) -}}
{{- $isPureTeeShim := and $isTeeShim (not (contains "nvidia-gpu" .shim)) -}}
{{- if or (and .shimConfig.runtimeClass .shimConfig.runtimeClass.nodeSelector) (and .useShimNodeSelectors $isPureTeeShim) }}
{{- if and .shimConfig.runtimeClass .shimConfig.runtimeClass.nodeSelector }}
{{- range $key, $value := .shimConfig.runtimeClass.nodeSelector }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- else }}
{{- if contains "snp" .shim }}
amd.feature.node.kubernetes.io/snp: "true"
{{- end }}
{{- if contains "tdx" .shim }}
intel.feature.node.kubernetes.io/tdx: "true"
{{- end }}
{{- if or (hasSuffix "-se" .shim) (hasSuffix "-se-runtime-rs" .shim) }}
feature.node.kubernetes.io/cpu-security.se.enabled: "true"
{{- end }}
{{- end }}
{{- end }}
{{- end -}}
{{- if .Values.runtimeClasses.enabled }}
{{- $multiInstallSuffix := .Values.env.multiInstallSuffix }}
{{- $createDefaultRC := .Values.runtimeClasses.createDefault }}
{{- $defaultRCName := .Values.runtimeClasses.defaultName }}
{{- $nfdEnabled := index .Values "node-feature-discovery" "enabled" | default false }}
{{- $externalNFDNamespace := include "kata-deploy.detectExistingNFD" . | trim -}}
{{- $useShimNodeSelectors := or $nfdEnabled (ne $externalNFDNamespace "") -}}
{{- /* Get enabled shims from structured config using null-aware logic */ -}}
{{- $disableAll := .Values.shims.disableAll | default false -}}
{{- $enabledShims := list -}}
{{- range $shimName, $shimConfig := .Values.shims -}}
@@ -81,6 +22,7 @@ scheduling:
{{- end -}}
{{- end -}}
{{- /* Define runtime class configurations with their overhead settings and node selectors */ -}}
{{- $runtimeClassConfigs := dict
"clh" (dict "memory" "130Mi" "cpu" "250m")
"cloud-hypervisor" (dict "memory" "130Mi" "cpu" "250m")
@@ -104,16 +46,69 @@ scheduling:
"remote" (dict "memory" "120Mi" "cpu" "250m")
}}
{{- /* Create RuntimeClass for each enabled shim; when default RC is requested, emit it by reusing the same template with nameOverride */ -}}
{{- $defaultShim := index .Values.defaultShim "amd64" | default (index .Values.defaultShim "arm64") | default (index .Values.defaultShim "s390x") | default (index .Values.defaultShim "ppc64le") }}
{{- /* Create RuntimeClass for each enabled shim */ -}}
{{- range $shim := $enabledShims }}
{{- $config := index $runtimeClassConfigs $shim }}
{{- $shimConfig := index $.Values.shims $shim }}
{{- if $config }}
{{ include "kata-deploy.runtimeclass" (dict "root" $ "shim" $shim "config" $config "shimConfig" $shimConfig "nameOverride" "" "useShimNodeSelectors" $useShimNodeSelectors) }}
{{- if and $createDefaultRC (not $multiInstallSuffix) (eq $shim $defaultShim) }}
{{ include "kata-deploy.runtimeclass" (dict "root" $ "shim" $shim "config" $config "shimConfig" $shimConfig "nameOverride" $defaultRCName "useShimNodeSelectors" $useShimNodeSelectors) }}
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
{{- if $multiInstallSuffix }}
name: kata-{{ $shim }}-{{ $multiInstallSuffix }}
{{- else }}
name: kata-{{ $shim }}
{{- end }}
labels:
app.kubernetes.io/managed-by: kata-deploy
{{- if $multiInstallSuffix }}
kata-deploy/instance: {{ $multiInstallSuffix | quote }}
{{- else }}
kata-deploy/instance: "default"
{{- end }}
{{- if $multiInstallSuffix }}
handler: kata-{{ $shim }}-{{ $multiInstallSuffix }}
{{- else }}
handler: kata-{{ $shim }}
{{- end }}
overhead:
podFixed:
memory: {{ $config.memory | quote }}
cpu: {{ $config.cpu | quote }}
scheduling:
nodeSelector:
katacontainers.io/kata-runtime: "true"
{{- if and $shimConfig.runtimeClass $shimConfig.runtimeClass.nodeSelector }}
{{- range $key, $value := $shimConfig.runtimeClass.nodeSelector }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- /* Create default RuntimeClass if requested */ -}}
{{- if and $createDefaultRC (not $multiInstallSuffix) }}
{{- /* Get default shim from structured config - use amd64 as the primary reference */ -}}
{{- $defaultShim := index .Values.defaultShim "amd64" | default (index .Values.defaultShim "arm64") | default (index .Values.defaultShim "s390x") | default (index .Values.defaultShim "ppc64le") }}
{{- $defaultConfig := index $runtimeClassConfigs $defaultShim }}
{{- if and $defaultShim $defaultConfig }}
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: {{ $defaultRCName }}
labels:
app.kubernetes.io/managed-by: kata-deploy
kata-deploy/instance: "default"
handler: kata-{{ $defaultShim }}
overhead:
podFixed:
memory: {{ $defaultConfig.memory | quote }}
cpu: {{ $defaultConfig.cpu | quote }}
scheduling:
nodeSelector:
katacontainers.io/kata-runtime: "true"
{{- end }}
{{- end }}
{{- end }}

View File

@@ -17,7 +17,6 @@ shims:
disableAll: true
# Enable TEE shims (qemu-snp, qemu-snp-runtime-rs, qemu-tdx, qemu-tdx-runtime-rs, qemu-se, qemu-se-runtime-rs, qemu-cca, qemu-coco-dev, qemu-coco-dev-runtime-rs)
# NFD TEE labels (snp, tdx, se) are auto-injected into RuntimeClasses when NFD is detected; no need to set nodeSelector here.
qemu-snp:
enabled: true
supportedArches:

View File

@@ -103,7 +103,6 @@ MEASURED_ROOTFS="${MEASURED_ROOTFS:-no}"
USE_CACHE="${USE_CACHE:-}"
BUSYBOX_CONF_FILE=${BUSYBOX_CONF_FILE:-}
NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK:-}"
KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-}
GUEST_HOOKS_TARBALL_NAME="${GUEST_HOOKS_TARBALL_NAME:-}"
EXTRA_PKGS="${EXTRA_PKGS:-}"
REPO_URL="${REPO_URL:-}"
@@ -144,7 +143,6 @@ docker run \
--env USE_CACHE="${USE_CACHE}" \
--env BUSYBOX_CONF_FILE="${BUSYBOX_CONF_FILE}" \
--env NVIDIA_GPU_STACK="${NVIDIA_GPU_STACK}" \
--env KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN}" \
--env GUEST_HOOKS_TARBALL_NAME="${GUEST_HOOKS_TARBALL_NAME}" \
--env EXTRA_PKGS="${EXTRA_PKGS}" \
--env REPO_URL="${REPO_URL}" \

View File

@@ -57,7 +57,6 @@ AGENT_POLICY="${AGENT_POLICY:-yes}"
TARGET_BRANCH="${TARGET_BRANCH:-main}"
PUSH_TO_REGISTRY="${PUSH_TO_REGISTRY:-}"
RELEASE="${RELEASE:-"no"}"
KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN:-}"
RUNTIME_CHOICE="${RUNTIME_CHOICE:-both}"
KERNEL_DEBUG_ENABLED=${KERNEL_DEBUG_ENABLED:-"no"}
INIT_DATA="${INIT_DATA:-yes}"

View File

@@ -23,20 +23,20 @@ pushd ${KATA_DEPLOY_DIR}
arch=$(uname -m)
[ "$arch" = "x86_64" ] && arch="amd64"
# Disable provenance and SBOM so each tag is a single image manifest. quay.io rejects
# pushing multi-arch manifest lists that include attestation manifests ("manifest invalid").
# Single platform so each job pushes one architecture; attestations (provenance/SBOM)
# are kept by default, making the tag an image index (manifest list).
PLATFORM="linux/${arch}"
IMAGE_TAG="${REGISTRY}:kata-containers-$(git rev-parse HEAD)-${arch}"
echo "Building the image"
docker buildx build --platform "${PLATFORM}" --provenance false --sbom false \
echo "Building the image (with provenance and SBOM attestations)"
docker buildx build --platform "${PLATFORM}" \
--tag "${IMAGE_TAG}" --push .
if [ -n "${TAG}" ]; then
ADDITIONAL_TAG="${REGISTRY}:${TAG}"
echo "Building the ${ADDITIONAL_TAG} image"
docker buildx build --platform "${PLATFORM}" --provenance false --sbom false \
docker buildx build --platform "${PLATFORM}" \
--tag "${ADDITIONAL_TAG}" --push .
fi

View File

@@ -31,7 +31,6 @@ readonly default_config_whitelist="${script_dir}/configs/fragments/whitelist.con
# xPU vendor
readonly VENDOR_INTEL="intel"
readonly VENDOR_NVIDIA="nvidia"
readonly KBUILD_SIGN_PIN=${KBUILD_SIGN_PIN:-""}
readonly KERNEL_DEBUG_ENABLED=${KERNEL_DEBUG_ENABLED:-"no"}
#Path to kernel directory
@@ -313,13 +312,6 @@ get_kernel_frag_path() {
all_configs="${all_configs} ${tmpfs_configs}"
fi
if [[ "${KBUILD_SIGN_PIN}" != "" ]]; then
info "Enabling config for module signing"
local sign_configs
sign_configs="$(ls ${common_path}/signing/module_signing.conf)"
all_configs="${all_configs} ${sign_configs}"
fi
if [[ ${KERNEL_DEBUG_ENABLED} == "yes" ]]; then
info "Enable kernel debug"
local debug_configs="$(ls ${common_path}/common/debug.conf)"
@@ -542,16 +534,6 @@ build_kernel_headers() {
if [ "$linux_headers" == "rpm" ]; then
make -j $(nproc) rpm-pkg ARCH="${arch_target}"
fi
# If we encrypt the key earlier it will break the kernel_headers build.
# At this stage the kernel has created the certs/signing_key.pem
# encrypt it for later usage in another job or out-of-tree build
# only encrypt if we have KBUILD_SIGN_PIN set
local key="certs/signing_key.pem"
if [ -n "${KBUILD_SIGN_PIN}" ]; then
[ -e "${key}" ] || die "${key} missing but KBUILD_SIGN_PIN is set"
openssl rsa -aes256 -in ${key} -out ${key} -passout env:KBUILD_SIGN_PIN
fi
popd >>/dev/null
}

View File

@@ -144,10 +144,9 @@ function _publish_multiarch_manifest()
_check_required_env_var "KATA_DEPLOY_IMAGE_TAGS"
_check_required_env_var "KATA_DEPLOY_REGISTRIES"
# Per-arch images are built without provenance/SBOM so each tag is a single image manifest;
# quay.io rejects pushing multi-arch manifest lists that include attestation manifests
# ("manifest invalid"), so we do not enable them for this workflow.
# imagetools create pushes to --tag by default.
# Per-arch tags may be image indexes (image + attestations). Use buildx imagetools create
# so we can merge them; legacy "docker manifest create" rejects manifest list sources.
# imagetools create pushes the new manifest list to --tag by default (no separate push).
for registry in "${REGISTRIES[@]}"; do
for tag in "${IMAGE_TAGS[@]}"; do
docker buildx imagetools create --tag "${registry}:${tag}" \

View File

@@ -26,7 +26,6 @@ DESTDIR=${DESTDIR:-${PWD}}
PREFIX=${PREFIX:-/opt/kata}
container_image="${KERNEL_CONTAINER_BUILDER:-$(get_kernel_image_name)}"
MEASURED_ROOTFS=${MEASURED_ROOTFS:-no}
KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN:-}"
kernel_builder_args="-a ${ARCH:-} $*"
KERNEL_DEBUG_ENABLED=${KERNEL_DEBUG_ENABLED:-"no"}
@@ -69,7 +68,6 @@ container_build+=" --build-arg ARCH=${ARCH:-}"
"${container_engine}" run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
-w "${PWD}" \
--env KERNEL_DEBUG_ENABLED="${KERNEL_DEBUG_ENABLED}" \
--env KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN}" \
--user "$(id -u)":"$(id -g)" \
"${container_image}" \
bash -c "${kernel_builder} ${kernel_builder_args} setup"
@@ -91,7 +89,6 @@ container_build+=" --build-arg ARCH=${ARCH:-}"
-w "${PWD}" \
--env DESTDIR="${DESTDIR}" --env PREFIX="${PREFIX}" \
--env USER="${USER}" \
--env KBUILD_SIGN_PIN="${KBUILD_SIGN_PIN}" \
--user "$(id -u)":"$(id -g)" \
"${container_image}" \
bash -c "${kernel_builder} ${kernel_builder_args} build-headers"

View File

@@ -88,8 +88,8 @@ assets:
qemu:
description: "VMM that uses KVM"
url: "https://github.com/qemu/qemu"
version: "v10.2.1"
tag: "v10.2.1"
version: "v10.2.0"
tag: "v10.2.0"
# Do not include any non-full release versions
# Break the line *without CR or space being appended*, to appease
# yamllint, and note the deliberate ' ' at the end of the expression.
@@ -207,11 +207,11 @@ assets:
kernel:
description: "Linux kernel optimised for virtual machines"
url: "https://cdn.kernel.org/pub/linux/kernel/v6.x/"
version: "v6.18.12"
version: "v6.18.5"
nvidia:
description: "Linux kernel optimised for virtual machines"
url: "https://cdn.kernel.org/pub/linux/kernel/v6.x/"
version: "v6.18.12"
version: "v6.18.5"
kernel-arm-experimental:
description: "Linux kernel with cpu/mem hotplug support on arm64"