Merge commit from fork

runtime-go/rs: Disable virtio-pmem for Cloud Hypervisor
This commit is contained in:
Steve Horsman
2026-02-19 09:00:56 +00:00
committed by GitHub
18 changed files with 129 additions and 137 deletions

View File

@@ -298,7 +298,7 @@ ifneq (,$(CLHCMD))
KERNELTYPE_CLH = uncompressed
KERNEL_NAME_CLH = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_CLH))
KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH)
VMROOTFSDRIVER_CLH := virtio-pmem
VMROOTFSDRIVER_CLH := virtio-blk-pci
DEFSANDBOXCGROUPONLY_CLH := true
DEFSTATICRESOURCEMGMT_CLH := false

View File

@@ -22,6 +22,8 @@ rootfs_type = @DEFROOTFSTYPE@
# Block storage driver to be used for the VM rootfs is backed
# by a block device.
#
# virtio-pmem is not supported with Cloud Hypervisor.
vm_rootfs_driver = "@VMROOTFSDRIVER_CLH@"
# Path to the firmware.

View File

@@ -118,13 +118,11 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
// Note how CH handles the different image types:
//
// - A standard image is specified in PmemConfig.
// - An initrd/initramfs is specified in PayloadConfig.
// - A confidential guest image is specified by a DiskConfig.
// - An image is specified in DiskConfig.
// Note: pmem is not used as it's not properly supported by Cloud Hypervisor.
// - If TDX is enabled, the firmware (`td-shim` [1]) must be
// specified in PayloadConfig.
// - A confidential guest initrd is specified by a PayloadConfig with
// firmware.
//
// [1] - https://github.com/confidential-containers/td-shim
let boot_info = cfg.boot_info;
@@ -140,14 +138,6 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
return Err(VmConfigError::NoBootFile);
}
let pmem = if use_initrd || guest_protection_is_tdx(guest_protection_to_use.clone()) {
None
} else {
let pmem = PmemConfig::try_from(&boot_info).map_err(VmConfigError::PmemError)?;
Some(vec![pmem])
};
let payload = Some(
PayloadConfig::try_from((
boot_info.clone(),
@@ -159,7 +149,7 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
let mut disks: Vec<DiskConfig> = vec![];
if use_image && guest_protection_is_tdx(guest_protection_to_use.clone()) {
if use_image {
let disk = DiskConfig::try_from(boot_info).map_err(VmConfigError::DiskError)?;
disks.push(disk);
@@ -199,7 +189,6 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
fs,
net,
devices: host_devices,
pmem,
disks,
vsock: Some(vsock),
rng,
@@ -1656,7 +1645,6 @@ mod tests {
let (memory_info_confidential_guest, mem_config_confidential_guest) =
make_memory_objects(79, usable_max_mem_bytes, true);
let (_, pmem_config_with_image) = make_bootinfo_pmemconfig_objects(image);
let (machine_info, rng_config) = make_machineinfo_rngconfig_objects(entropy_source);
let payload_firmware = None;
@@ -1664,6 +1652,7 @@ mod tests {
let (boot_info_with_initrd, payload_config_with_initrd) =
make_bootinfo_payloadconfig_objects(kernel, initramfs, payload_firmware, None);
let (_, disk_config_with_image) = make_bootinfo_diskconfig_objects(image);
let (_, disk_config_confidential_guest_image) = make_bootinfo_diskconfig_objects(image);
let boot_info_tdx_image = BootInfo {
@@ -1762,7 +1751,7 @@ mod tests {
vsock: Some(valid_vsock.clone()),
// rootfs image specific
pmem: Some(vec![pmem_config_with_image]),
disks: Some(vec![disk_config_with_image]),
payload: Some(PayloadConfig {
kernel: Some(PathBuf::from(kernel)),

View File

@@ -123,7 +123,12 @@ impl CloudHypervisorInner {
}
}
pub fn set_hypervisor_config(&mut self, config: HypervisorConfig) {
pub fn set_hypervisor_config(&mut self, mut config: HypervisorConfig) {
// virtio-pmem is not supported for Cloud Hypervisor.
if config.boot_info.vm_rootfs_driver == crate::VM_ROOTFS_DRIVER_PMEM {
config.boot_info.vm_rootfs_driver = crate::VM_ROOTFS_DRIVER_BLK.to_string();
}
self.config = config;
}

View File

@@ -15,7 +15,6 @@ use crate::utils::vm_cleanup;
use crate::utils::{bytes_to_megs, get_jailer_root, get_sandbox_path, megs_to_bytes};
use crate::MemoryConfig;
use crate::VM_ROOTFS_DRIVER_BLK;
use crate::VM_ROOTFS_DRIVER_PMEM;
use crate::{VcpuThreadIds, VmmState};
use anyhow::{anyhow, Context, Result};
use ch_config::ch_api::cloud_hypervisor_vm_netdev_add_with_fds;
@@ -130,12 +129,8 @@ impl CloudHypervisorInner {
let confidential_guest = cfg.security_info.confidential_guest;
// Note that the configuration option hypervisor.block_device_driver is not used.
let rootfs_driver = if confidential_guest {
// PMEM is not available with TDX.
VM_ROOTFS_DRIVER_BLK
} else {
VM_ROOTFS_DRIVER_PMEM
};
// NVDIMM is not supported for Cloud Hypervisor.
let rootfs_driver = VM_ROOTFS_DRIVER_BLK;
let rootfs_type = match cfg.boot_info.rootfs_type.is_empty() {
true => DEFAULT_CH_ROOTFS_TYPE,
@@ -155,6 +150,7 @@ impl CloudHypervisorInner {
&cfg.boot_info.kernel_verity_params,
rootfs_driver,
rootfs_type,
true,
)?;
let mut console_params = if enable_debug {

View File

@@ -150,6 +150,7 @@ impl DragonballInner {
&self.config.boot_info.kernel_verity_params,
&rootfs_driver,
&self.config.boot_info.rootfs_type,
true,
)?;
kernel_params.append(&mut rootfs_params);
}

View File

@@ -90,6 +90,7 @@ impl FcInner {
&self.config.boot_info.kernel_verity_params,
&self.config.blockdev_info.block_device_driver,
&self.config.boot_info.rootfs_type,
true,
)?;
kernel_params.append(&mut rootfs_params);
kernel_params.append(&mut KernelParams::from_string(

View File

@@ -10,8 +10,8 @@ use crate::{
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_BLK_CCW, VM_ROOTFS_DRIVER_MMIO, VM_ROOTFS_DRIVER_PMEM,
VM_ROOTFS_ROOT_BLK, VM_ROOTFS_ROOT_PMEM,
};
use kata_types::config::LOG_VPORT_OPTION;
use kata_types::config::hypervisor::{parse_kernel_verity_params, VERITY_BLOCK_SIZE_BYTES};
use kata_types::config::LOG_VPORT_OPTION;
use kata_types::fs::{
VM_ROOTFS_FILESYSTEM_EROFS, VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS,
};
@@ -66,8 +66,7 @@ struct KernelVerityConfig {
}
fn new_kernel_verity_params(params_string: &str) -> Result<Option<KernelVerityConfig>> {
let cfg = parse_kernel_verity_params(params_string)
.map_err(|err| anyhow!(err.to_string()))?;
let cfg = parse_kernel_verity_params(params_string).map_err(|err| anyhow!(err.to_string()))?;
Ok(cfg.map(|params| KernelVerityConfig {
root_hash: params.root_hash,
@@ -145,6 +144,7 @@ impl KernelParams {
kernel_verity_params: &str,
rootfs_driver: &str,
rootfs_type: &str,
use_dax: bool,
) -> Result<Self> {
let mut params = vec![];
@@ -153,16 +153,29 @@ impl KernelParams {
params.push(Param::new("root", VM_ROOTFS_ROOT_PMEM));
match rootfs_type {
VM_ROOTFS_FILESYSTEM_EXT4 => {
params.push(Param::new(
"rootflags",
"dax,data=ordered,errors=remount-ro ro",
));
if use_dax {
params.push(Param::new(
"rootflags",
"dax,data=ordered,errors=remount-ro ro",
));
} else {
params
.push(Param::new("rootflags", "data=ordered,errors=remount-ro ro"));
}
}
VM_ROOTFS_FILESYSTEM_XFS => {
params.push(Param::new("rootflags", "dax ro"));
if use_dax {
params.push(Param::new("rootflags", "dax ro"));
} else {
params.push(Param::new("rootflags", "ro"));
}
}
VM_ROOTFS_FILESYSTEM_EROFS => {
params.push(Param::new("rootflags", "dax ro"));
if use_dax {
params.push(Param::new("rootflags", "dax ro"));
} else {
params.push(Param::new("rootflags", "ro"));
}
}
_ => {
return Err(anyhow!("Unsupported rootfs type {}", rootfs_type));
@@ -346,6 +359,7 @@ mod tests {
struct TestData<'a> {
rootfs_driver: &'a str,
rootfs_type: &'a str,
use_dax: bool,
expect_params: KernelParams,
result: Result<()>,
}
@@ -353,10 +367,11 @@ mod tests {
#[test]
fn test_rootfs_kernel_params() {
let tests = &[
// EXT4
// EXT4 with DAX
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_PMEM),
@@ -370,6 +385,7 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -380,14 +396,15 @@ mod tests {
},
result: Ok(()),
},
// XFS
// XFS without DAX
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
rootfs_type: VM_ROOTFS_FILESYSTEM_XFS,
use_dax: false,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_PMEM),
Param::new("rootflags", "dax ro"),
Param::new("rootflags", "ro"),
Param::new("rootfstype", VM_ROOTFS_FILESYSTEM_XFS),
]
.to_vec(),
@@ -397,6 +414,7 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: VM_ROOTFS_FILESYSTEM_XFS,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -407,10 +425,11 @@ mod tests {
},
result: Ok(()),
},
// EROFS
// EROFS with DAX
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_PMEM,
rootfs_type: VM_ROOTFS_FILESYSTEM_EROFS,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_PMEM),
@@ -424,6 +443,7 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: VM_ROOTFS_FILESYSTEM_EROFS,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -438,6 +458,7 @@ mod tests {
TestData {
rootfs_driver: "foo",
rootfs_type: VM_ROOTFS_FILESYSTEM_EXT4,
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -452,6 +473,7 @@ mod tests {
TestData {
rootfs_driver: VM_ROOTFS_DRIVER_BLK,
rootfs_type: "foo",
use_dax: true,
expect_params: KernelParams {
params: [
Param::new("root", VM_ROOTFS_ROOT_BLK),
@@ -466,8 +488,12 @@ mod tests {
for (i, t) in tests.iter().enumerate() {
let msg = format!("test[{i}]: {t:?}");
let result =
KernelParams::new_rootfs_kernel_params("", t.rootfs_driver, t.rootfs_type);
let result = KernelParams::new_rootfs_kernel_params(
"",
t.rootfs_driver,
t.rootfs_type,
t.use_dax,
);
let msg = format!("{msg}, result: {result:?}");
if t.result.is_ok() {
assert!(result.is_ok(), "{}", msg);
@@ -486,6 +512,7 @@ mod tests {
"root_hash=abc,salt=def,data_blocks=1,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)?;
let params_string = params.to_string()?;
assert!(params_string.contains("dm-mod.create="));
@@ -496,6 +523,7 @@ mod tests {
"root_hash=abc,data_blocks=1,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected missing salt error");
@@ -505,6 +533,7 @@ mod tests {
"root_hash=abc,salt=def,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected missing data_blocks error");
@@ -514,6 +543,7 @@ mod tests {
"root_hash=abc,salt=def,data_blocks=foo,data_block_size=4096,hash_block_size=4096",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected invalid data_blocks error");
@@ -523,6 +553,7 @@ mod tests {
"root_hash=abc,salt=def,data_blocks=1,data_block_size=4096,hash_block_size=4096,badfield",
VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_FILESYSTEM_EXT4,
false,
)
.err()
.expect("expected invalid entry error");

View File

@@ -179,10 +179,17 @@ impl Kernel {
let mut kernel_params = KernelParams::new(config.debug_info.enable_debug);
if config.boot_info.initrd.is_empty() {
// DAX is disabled on ARM due to a kernel panic in caches_clean_inval_pou.
#[cfg(target_arch = "aarch64")]
let use_dax = false;
#[cfg(not(target_arch = "aarch64"))]
let use_dax = true;
let mut rootfs_params = KernelParams::new_rootfs_kernel_params(
&config.boot_info.kernel_verity_params,
&config.boot_info.vm_rootfs_driver,
&config.boot_info.rootfs_type,
use_dax,
)
.context("adding rootfs/verity params failed")?;
kernel_params.append(&mut rootfs_params);

View File

@@ -288,6 +288,7 @@ DEFSTATICRESOURCEMGMT_NV = true
DEFDISABLEIMAGENVDIMM ?= false
DEFDISABLEIMAGENVDIMM_NV = true
DEFDISABLEIMAGENVDIMM_CLH ?= true
DEFBINDMOUNTS := []
@@ -788,6 +789,7 @@ USER_VARS += DEFVFIOMODE_SE
USER_VARS += BUILDFLAGS
USER_VARS += DEFDISABLEIMAGENVDIMM
USER_VARS += DEFDISABLEIMAGENVDIMM_NV
USER_VARS += DEFDISABLEIMAGENVDIMM_CLH
USER_VARS += DEFCCAMEASUREMENTALGO
USER_VARS += DEFSHAREDFS_QEMU_CCA_VIRTIOFS
USER_VARS += DEFPODRESOURCEAPISOCK

View File

@@ -222,8 +222,8 @@ hypervisor_loglevel = 1
# If false and nvdimm is supported, use nvdimm device to plug guest image.
# Otherwise virtio-block device is used.
#
# nvdimm is not supported when `confidential_guest = true`.
disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
# nvdimm is not supported with Cloud Hypervisor or when `confidential_guest = true`.
disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM_CLH@
# Enable hot-plugging of VFIO devices to a root-port.
# The default setting is "no-port"

View File

@@ -332,6 +332,9 @@ func (clh *cloudHypervisor) getClhStopSandboxTimeout() time.Duration {
func (clh *cloudHypervisor) setConfig(config *HypervisorConfig) error {
clh.config = *config
// We don't support NVDIMM with Cloud Hypervisor.
clh.config.DisableImageNvdimm = true
return nil
}
@@ -584,8 +587,8 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
// Set initial amount of cpu's for the virtual machine
clh.vmconfig.Cpus = chclient.NewCpusConfig(int32(clh.config.NumVCPUs()), int32(clh.config.DefaultMaxVCPUs))
disableNvdimm := (clh.config.DisableImageNvdimm || clh.config.ConfidentialGuest)
enableDax := !disableNvdimm
disableNvdimm := true
enableDax := false
params, err := getNonUserDefinedKernelParams(hypervisorConfig.RootfsType, disableNvdimm, enableDax, clh.config.Debug, clh.config.ConfidentialGuest, clh.config.IOMMU, hypervisorConfig.KernelVerityParams)
if err != nil {
@@ -607,31 +610,19 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
}
if assetType == types.ImageAsset {
if clh.config.DisableImageNvdimm || clh.config.ConfidentialGuest {
disk := chclient.NewDiskConfig()
disk.Path = &assetPath
disk.SetReadonly(true)
disk := chclient.NewDiskConfig()
disk.Path = &assetPath
disk.SetReadonly(true)
diskRateLimiterConfig := clh.getDiskRateLimiterConfig()
if diskRateLimiterConfig != nil {
disk.SetRateLimiterConfig(*diskRateLimiterConfig)
}
diskRateLimiterConfig := clh.getDiskRateLimiterConfig()
if diskRateLimiterConfig != nil {
disk.SetRateLimiterConfig(*diskRateLimiterConfig)
}
if clh.vmconfig.Disks != nil {
*clh.vmconfig.Disks = append(*clh.vmconfig.Disks, *disk)
} else {
clh.vmconfig.Disks = &[]chclient.DiskConfig{*disk}
}
if clh.vmconfig.Disks != nil {
*clh.vmconfig.Disks = append(*clh.vmconfig.Disks, *disk)
} else {
pmem := chclient.NewPmemConfig(assetPath)
*pmem.DiscardWrites = true
pmem.SetIommu(clh.config.IOMMU)
if clh.vmconfig.Pmem != nil {
*clh.vmconfig.Pmem = append(*clh.vmconfig.Pmem, *pmem)
} else {
clh.vmconfig.Pmem = &[]chclient.PmemConfig{*pmem}
}
clh.vmconfig.Disks = &[]chclient.DiskConfig{*disk}
}
} else {
// assetType == types.InitrdAsset

View File

@@ -69,6 +69,7 @@ func newClhConfig() (HypervisorConfig, error) {
NetRateLimiterOpsMaxRate: int64(0),
NetRateLimiterOpsOneTimeBurst: int64(0),
HotPlugVFIO: config.NoPort,
DisableImageNvdimm: true,
}, nil
}

View File

@@ -332,6 +332,38 @@ func TestQemuArchBaseAppendImage(t *testing.T) {
assert.Equal(expectedOut, devices)
}
func TestQemuArchBaseAppendNvdimmImage(t *testing.T) {
var devices []govmmQemu.Device
assert := assert.New(t)
qemuArchBase := newQemuArchBase()
image, err := os.CreateTemp("", "img")
assert.NoError(err)
defer image.Close()
defer os.Remove(image.Name())
imageStat, err := image.Stat()
assert.NoError(err)
devices, err = qemuArchBase.appendNvdimmImage(devices, image.Name())
assert.NoError(err)
assert.Len(devices, 1)
expectedOut := []govmmQemu.Device{
govmmQemu.Object{
Driver: govmmQemu.NVDIMM,
Type: govmmQemu.MemoryBackendFile,
DeviceID: "nv0",
ID: "mem0",
MemPath: image.Name(),
Size: (uint64)(imageStat.Size()),
ReadOnly: true,
},
}
assert.Equal(expectedOut, devices)
}
func TestQemuArchBaseAppendBridges(t *testing.T) {
var devices []govmmQemu.Device
assert := assert.New(t)

View File

@@ -10,7 +10,6 @@ package virtcontainers
import (
"context"
"fmt"
"os"
"time"
govmmQemu "github.com/kata-containers/kata-containers/src/runtime/pkg/govmm/qemu"
@@ -69,9 +68,10 @@ func newQemuArch(config HypervisorConfig) (qemuArch, error) {
kernelParamsDebug: kernelParamsDebug,
kernelParams: kernelParams,
disableNvdimm: config.DisableImageNvdimm,
dax: true,
protection: noneProtection,
legacySerial: config.LegacySerial,
// DAX is disabled on ARM due to a kernel panic in caches_clean_inval_pou.
dax: false,
protection: noneProtection,
legacySerial: config.LegacySerial,
},
measurementAlgo: config.MeasurementAlgo,
}
@@ -109,35 +109,6 @@ func (q *qemuArm64) appendImage(ctx context.Context, devices []govmmQemu.Device,
return q.appendBlockImage(ctx, devices, path)
}
// There is no nvdimm/readonly feature in qemu 5.1 which is used by arm64 for now,
// so we temporarily add this specific implementation for arm64 here until
// the qemu used by arm64 is capable for that feature
func (q *qemuArm64) appendNvdimmImage(devices []govmmQemu.Device, path string) ([]govmmQemu.Device, error) {
imageFile, err := os.Open(path)
if err != nil {
return nil, err
}
defer imageFile.Close()
imageStat, err := imageFile.Stat()
if err != nil {
return nil, err
}
object := govmmQemu.Object{
Driver: govmmQemu.NVDIMM,
Type: govmmQemu.MemoryBackendFile,
DeviceID: "nv0",
ID: "mem0",
MemPath: path,
Size: (uint64)(imageStat.Size()),
}
devices = append(devices, object)
return devices, nil
}
func (q *qemuArm64) setIgnoreSharedMemoryMigrationCaps(_ context.Context, _ *govmmQemu.QMP) error {
// x-ignore-shared not support in arm64 for now
return nil

View File

@@ -130,39 +130,6 @@ func TestQemuArm64AppendImage(t *testing.T) {
assert.Equal(expectedOut, devices)
}
func TestQemuArm64AppendNvdimmImage(t *testing.T) {
var devices []govmmQemu.Device
assert := assert.New(t)
f, err := os.CreateTemp("", "img")
assert.NoError(err)
defer func() { _ = f.Close() }()
defer func() { _ = os.Remove(f.Name()) }()
imageStat, err := f.Stat()
assert.NoError(err)
cfg := qemuConfig(QemuVirt)
cfg.ImagePath = f.Name()
arm64, err := newQemuArch(cfg)
assert.NoError(err)
expectedOut := []govmmQemu.Device{
govmmQemu.Object{
Driver: govmmQemu.NVDIMM,
Type: govmmQemu.MemoryBackendFile,
DeviceID: "nv0",
ID: "mem0",
MemPath: f.Name(),
Size: (uint64)(imageStat.Size()),
},
}
devices, err = arm64.appendNvdimmImage(devices, f.Name())
assert.NoError(err)
assert.Equal(expectedOut, devices)
}
func TestQemuArm64WithInitrd(t *testing.T) {
assert := assert.New(t)

View File

@@ -175,7 +175,7 @@ function deploy_kata() {
ANNOTATIONS="default_vcpus"
if [[ "${KATA_HOST_OS}" = "cbl-mariner" ]]; then
ANNOTATIONS="image kernel default_vcpus disable_image_nvdimm cc_init_data"
ANNOTATIONS="image kernel default_vcpus cc_init_data"
fi
if [[ "${KATA_HYPERVISOR}" = "qemu" ]]; then
ANNOTATIONS="image initrd kernel default_vcpus"

View File

@@ -108,12 +108,8 @@ add_cbl_mariner_annotation_to_yaml() {
local -r mariner_annotation_image="io.katacontainers.config.hypervisor.image"
local -r mariner_image_path="/opt/kata/share/kata-containers/kata-containers-mariner.img"
local -r mariner_annotation_disable_image_nvdimm="io.katacontainers.config.hypervisor.disable_image_nvdimm"
local -r mariner_disable_image_nvdimm=true
add_annotations_to_yaml "${yaml_file}" "${mariner_annotation_kernel}" "${mariner_kernel_path}"
add_annotations_to_yaml "${yaml_file}" "${mariner_annotation_image}" "${mariner_image_path}"
add_annotations_to_yaml "${yaml_file}" "${mariner_annotation_disable_image_nvdimm}" "${mariner_disable_image_nvdimm}"
}
add_cbl_mariner_specific_annotations() {