runtime-rs: Add kernel_modules_images support

Add support for attaching multiple kernel modules disk images in
the Rust runtime, mirroring the Go runtime implementation.

Each configured image is cold-plugged as a read-only block device
and a Storage entry is sent to the agent to mount it at
/lib/modules/kata-modules-<N>.

Signed-off-by: Fabiano Fidêncio <ffidencio@nvidia.com>
This commit is contained in:
Fabiano Fidêncio
2026-04-24 13:24:27 +02:00
parent f5551a5bdd
commit 19cc1eb7f8
6 changed files with 173 additions and 7 deletions

View File

@@ -444,6 +444,18 @@ pub fn validate_block_device_sector_size(size: u32) -> Result<()> {
Ok(())
}
/// Configuration for a disk image containing kernel modules.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct KernelModulesImageConfig {
/// Path to the modules disk image on the host.
#[serde(default)]
pub path: String,
/// Optional dm-verity parameters for integrity verification.
#[serde(default)]
pub verity_params: String,
}
/// Guest kernel boot information.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct BootInfo {
@@ -459,6 +471,10 @@ pub struct BootInfo {
#[serde(default)]
pub kernel_verity_params: String,
/// List of disk images containing kernel modules to attach.
#[serde(default)]
pub kernel_modules_images: Vec<KernelModulesImageConfig>,
/// Path to initrd file on host.
#[serde(default)]
pub initrd: String,

View File

@@ -6,11 +6,15 @@
use anyhow::{anyhow, Result};
use std::collections::HashMap;
use crate::{
VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_BLK_CCW, VM_ROOTFS_DRIVER_MMIO, VM_ROOTFS_DRIVER_PMEM,
VM_ROOTFS_ROOT_BLK, VM_ROOTFS_ROOT_PMEM,
};
use kata_types::config::hypervisor::{parse_kernel_verity_params, VERITY_BLOCK_SIZE_BYTES};
use kata_types::config::hypervisor::{
parse_kernel_verity_params, KernelModulesImageConfig, VERITY_BLOCK_SIZE_BYTES,
};
use kata_types::config::LOG_VPORT_OPTION;
use kata_types::fs::{
VM_ROOTFS_FILESYSTEM_EROFS, VM_ROOTFS_FILESYSTEM_EXT4, VM_ROOTFS_FILESYSTEM_XFS,
@@ -254,6 +258,64 @@ impl KernelParams {
Ok(params)
}
/// Generate dm-mod.create kernel parameters for module images that have
/// dm-verity configured. Module images use a single block device with the
/// hash tree appended after the data section, so both the data device and
/// hash device are the same /dev/vdX and hash_start_block equals data_blocks.
///
/// `dm_offset` is the first dm device index to use (e.g. 1 when the rootfs
/// already occupies dm-0). `blk_offset` is the virtio-blk letter offset
/// (b'a' when nvdimm is used, b'b' when virtio-blk rootfs takes vda).
///
/// Returns the params and a mapping from image index to the dm device path.
pub(crate) fn new_modules_verity_params(
images: &[KernelModulesImageConfig],
dm_offset: usize,
blk_offset: u8,
) -> Result<(Self, HashMap<usize, String>)> {
let mut params = Vec::new();
let mut dm_devices: HashMap<usize, String> = HashMap::new();
let mut dm_idx = dm_offset;
for (i, img) in images.iter().enumerate() {
if img.verity_params.trim().is_empty() {
continue;
}
let cfg = match new_kernel_verity_params(&img.verity_params)? {
Some(cfg) => cfg,
None => continue,
};
let dev_letter = (blk_offset + i as u8) as char;
let dev = format!("/dev/vd{dev_letter}");
let data_sectors =
(cfg.data_block_size / VERITY_BLOCK_SIZE_BYTES) * cfg.data_blocks;
let dm_cmd = format!(
"dm-modules-{},,,ro,0 {} verity 1 {} {} {} {} {} {} sha256 {} {}",
i,
data_sectors,
dev,
dev,
cfg.data_block_size,
cfg.hash_block_size,
cfg.data_blocks,
cfg.data_blocks,
cfg.root_hash,
cfg.salt,
);
params.push(Param {
key: "dm-mod.create".to_string(),
value: format!("\"{}\"", dm_cmd),
});
dm_devices.insert(i, format!("/dev/dm-{}", dm_idx));
dm_idx += 1;
}
Ok((Self { params }, dm_devices))
}
pub(crate) fn append(&mut self, params: &mut KernelParams) {
self.params.append(&mut params.params);
}

View File

@@ -195,6 +195,24 @@ impl Kernel {
kernel_params.append(&mut rootfs_params);
}
if !config.boot_info.kernel_modules_images.is_empty() {
let rootfs_has_verity =
!config.boot_info.kernel_verity_params.trim().is_empty();
let dm_offset: usize = if rootfs_has_verity { 1 } else { 0 };
let blk_offset: u8 = if config.blockdev_info.disable_image_nvdimm {
b'b'
} else {
b'a'
};
let (mut mod_params, _) = KernelParams::new_modules_verity_params(
&config.boot_info.kernel_modules_images,
dm_offset,
blk_offset,
)
.context("adding module verity params failed")?;
kernel_params.append(&mut mod_params);
}
kernel_params.append(&mut KernelParams::from_string(
&config.boot_info.kernel_params,
));

View File

@@ -36,6 +36,7 @@ pub enum ResourceConfig {
Network(NetworkConfig),
ShareFs(SharedFsInfo),
VmRootfs(BlockConfig),
KernelModulesImages(Vec<BlockConfig>),
HybridVsock(HybridVsockConfig),
Vsock(VsockConfig),
Protection(ProtectionDeviceConfig),

View File

@@ -183,6 +183,13 @@ impl ResourceManagerInner {
.await
.context("do handle device failed.")?;
}
ResourceConfig::KernelModulesImages(images) => {
for img in images {
do_handle_device(&self.device_manager, &DeviceConfig::BlockCfg(img))
.await
.context("do handle kernel modules image device failed.")?;
}
}
ResourceConfig::HybridVsock(hv) => {
do_handle_device(&self.device_manager, &DeviceConfig::HybridVsockCfg(hv))
.await

View File

@@ -8,7 +8,8 @@ use crate::health_check::HealthCheck;
use agent::kata::KataAgent;
use agent::types::{KernelModule, SetPolicyRequest};
use agent::{
self, Agent, GetGuestDetailsRequest, GetIPTablesRequest, SetIPTablesRequest, VolumeStatsRequest,
self, Agent, GetGuestDetailsRequest, GetIPTablesRequest, SetIPTablesRequest, Storage,
VolumeStatsRequest,
};
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
@@ -194,6 +195,12 @@ impl VirtSandbox {
resource_configs.push(vm_rootfs);
}
// prepare kernel modules images device configs
let modules_images = self.prepare_modules_images_config().await;
if !modules_images.is_empty() {
resource_configs.push(ResourceConfig::KernelModulesImages(modules_images));
}
// prepare protection device config
let init_data = if let Some(initdata) = self
.prepare_initdata_device_config(&self.hypervisor.hypervisor_config().await)
@@ -380,6 +387,21 @@ impl VirtSandbox {
}))
}
async fn prepare_modules_images_config(&self) -> Vec<BlockConfig> {
let boot_info = self.hypervisor.hypervisor_config().await.boot_info;
boot_info
.kernel_modules_images
.iter()
.filter(|img| !img.path.is_empty())
.map(|img| BlockConfig {
path_on_host: img.path.clone(),
is_readonly: true,
driver_option: boot_info.vm_rootfs_driver.clone(),
..Default::default()
})
.collect()
}
async fn set_agent_policy(&self) -> Result<()> {
// TODO: Exclude policy-related items from the annotations.
let toml_config = self.resource_manager.config().await;
@@ -682,14 +704,54 @@ impl Sandbox for VirtSandbox {
// create sandbox in vm
let agent_config = self.agent.agent_config().await;
let kernel_modules = KernelModule::set_kernel_modules(agent_config.kernel_modules)?;
let mut storages = self
.resource_manager
.get_storage_for_sandbox(self.shm_size)
.await
.context("get storages for sandbox")?;
let hyp_config = self.hypervisor.hypervisor_config().await;
let blk_offset: u8 = if hyp_config.blockdev_info.disable_image_nvdimm {
b'b'
} else {
b'a'
};
let rootfs_has_verity =
!hyp_config.boot_info.kernel_verity_params.trim().is_empty();
let dm_offset: usize = if rootfs_has_verity { 1 } else { 0 };
let mut dm_idx = dm_offset;
for (i, img) in hyp_config
.boot_info
.kernel_modules_images
.iter()
.filter(|img| !img.path.is_empty())
.enumerate()
{
let has_verity = !img.verity_params.trim().is_empty();
let source = if has_verity {
let dm_dev = format!("/dev/dm-{dm_idx}");
dm_idx += 1;
dm_dev
} else {
let dev_name = format!("vd{}", (blk_offset + i as u8) as char);
format!("/dev/{dev_name}")
};
storages.push(Storage {
driver: "blk".to_string(),
source,
fs_type: "ext4".to_string(),
mount_point: format!("/run/kata-modules-{i}"),
options: vec!["ro".to_string()],
..Default::default()
});
}
let req = agent::CreateSandboxRequest {
hostname: sandbox_config.hostname.clone(),
dns: sandbox_config.dns.clone(),
storages: self
.resource_manager
.get_storage_for_sandbox(self.shm_size)
.await
.context("get storages for sandbox")?,
storages,
sandbox_pidns: false,
sandbox_id: id.to_string(),
guest_hook_path: self