mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-05-14 19:17:07 +00:00
runtime-rs: coldplug pending block devices in cloud-hypervisor
Queue early block devices in the cloud-hypervisor backend and carry them into VmConfig as pending_disks during VM creation. This restores the initdata delivery path for runtime-rs/ch by making pre-boot block devices available as coldplugged disks, similar to how QEMU handles them. Skip the VM rootfs image when collecting pending disks so boot_info.image is not added twice. Also add a conversion test covering a boot image plus an extra pending disk to protect the initdata path from regressions. Signed-off-by: Saul Paredes <saulparedes@microsoft.com>
This commit is contained in:
@@ -123,6 +123,7 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
|
||||
let net = n.network_devices;
|
||||
let host_devices = n.host_devices;
|
||||
let protection_dev = n.protection_device;
|
||||
let pending_disks = n.pending_disks.unwrap_or_default();
|
||||
|
||||
let cpus = CpusConfig::try_from((cfg.cpu_info, guest_protection_to_use.clone()))
|
||||
.map_err(VmConfigError::CPUError)?;
|
||||
@@ -169,6 +170,8 @@ impl TryFrom<NamedHypervisorConfig> for VmConfig {
|
||||
disks.push(disk);
|
||||
};
|
||||
|
||||
disks.extend(pending_disks);
|
||||
|
||||
let disks = if !disks.is_empty() { Some(disks) } else { None };
|
||||
|
||||
let serial = get_serial_cfg(debug, guest_protection_to_use.clone());
|
||||
@@ -1812,7 +1815,7 @@ mod tests {
|
||||
vsock: Some(valid_vsock.clone()),
|
||||
|
||||
// rootfs image specific
|
||||
disks: Some(vec![disk_config_with_image]),
|
||||
disks: Some(vec![disk_config_with_image.clone()]),
|
||||
|
||||
payload: Some(PayloadConfig {
|
||||
kernel: Some(PathBuf::from(kernel)),
|
||||
@@ -1824,6 +1827,24 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let initdata_disk_config = DiskConfig {
|
||||
path: Some(PathBuf::from(
|
||||
"/run/kata-containers/shared/initdata/sid/initdata.image",
|
||||
)),
|
||||
readonly: true,
|
||||
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let vmconfig_with_image_and_kernel_and_pending_disk = VmConfig {
|
||||
disks: Some(vec![
|
||||
disk_config_with_image.clone(),
|
||||
initdata_disk_config.clone(),
|
||||
]),
|
||||
|
||||
..vmconfig_with_image_and_kernel.clone()
|
||||
};
|
||||
|
||||
let vmconfig_with_initrd = VmConfig {
|
||||
cpus: cpus_config.clone(),
|
||||
memory: mem_config_std,
|
||||
@@ -1866,6 +1887,12 @@ mod tests {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let named_hypervisor_cfg_with_image_and_kernel_and_pending_disk = NamedHypervisorConfig {
|
||||
pending_disks: Some(vec![initdata_disk_config]),
|
||||
|
||||
..named_hypervisor_cfg_with_image_and_kernel.clone()
|
||||
};
|
||||
|
||||
let named_hypervisor_cfg_with_image_and_kernel_bad_cpu = NamedHypervisorConfig {
|
||||
kernel_params: kernel_params.to_string(),
|
||||
sandbox_path: sandbox_path.into(),
|
||||
@@ -2050,6 +2077,10 @@ mod tests {
|
||||
cfg: named_hypervisor_cfg_with_image_and_kernel,
|
||||
result: Ok(vmconfig_with_image_and_kernel),
|
||||
},
|
||||
TestData {
|
||||
cfg: named_hypervisor_cfg_with_image_and_kernel_and_pending_disk,
|
||||
result: Ok(vmconfig_with_image_and_kernel_and_pending_disk),
|
||||
},
|
||||
TestData {
|
||||
cfg: named_hypervisor_cfg_with_initrd,
|
||||
result: Ok(vmconfig_with_initrd),
|
||||
|
||||
@@ -513,6 +513,7 @@ pub struct NamedHypervisorConfig {
|
||||
pub vsock_socket_path: String,
|
||||
pub cfg: HypervisorConfig,
|
||||
|
||||
pub pending_disks: Option<Vec<DiskConfig>>,
|
||||
pub shared_fs_devices: Option<Vec<FsConfig>>,
|
||||
pub network_devices: Option<Vec<NetConfig>>,
|
||||
pub host_devices: Option<Vec<DeviceConfig>>,
|
||||
|
||||
@@ -73,6 +73,7 @@ impl CloudHypervisorInner {
|
||||
// - Network details need to be saved for later application.
|
||||
//
|
||||
match device {
|
||||
DeviceType::Block(_) => self.pending_devices.insert(0, device.clone()),
|
||||
DeviceType::ShareFs(_) => self.pending_devices.insert(0, device.clone()),
|
||||
DeviceType::Network(_) => self.pending_devices.insert(0, device.clone()),
|
||||
DeviceType::Vfio(_) => self.pending_devices.insert(0, device.clone()),
|
||||
@@ -348,11 +349,14 @@ impl CloudHypervisorInner {
|
||||
pub(crate) async fn get_shared_devices(
|
||||
&mut self,
|
||||
) -> Result<(
|
||||
Option<Vec<DiskConfig>>,
|
||||
Option<Vec<FsConfig>>,
|
||||
Option<Vec<NetConfig>>,
|
||||
Option<Vec<DeviceConfig>>,
|
||||
Option<ProtectionDevConfig>,
|
||||
)> {
|
||||
let vm_rootfs_path = self.hypervisor_config().boot_info.image;
|
||||
let mut coldplug_block_devices = Vec::<DiskConfig>::new();
|
||||
let mut shared_fs_devices = Vec::<FsConfig>::new();
|
||||
let mut network_devices = Vec::<NetConfig>::new();
|
||||
let mut host_devices = Vec::<DeviceConfig>::new();
|
||||
@@ -360,6 +364,14 @@ impl CloudHypervisorInner {
|
||||
|
||||
while let Some(dev) = self.pending_devices.pop() {
|
||||
match dev {
|
||||
DeviceType::Block(block_device) => {
|
||||
if block_device.config.path_on_host == vm_rootfs_path {
|
||||
continue;
|
||||
}
|
||||
|
||||
let disk_cfg = DiskConfig::try_from(block_device.config)?;
|
||||
coldplug_block_devices.push(disk_cfg);
|
||||
}
|
||||
DeviceType::ShareFs(dev) => {
|
||||
let settings = ShareFsSettings::new(dev.config, self.vm_path.clone());
|
||||
|
||||
@@ -480,6 +492,7 @@ impl CloudHypervisorInner {
|
||||
}
|
||||
|
||||
Ok((
|
||||
Some(coldplug_block_devices),
|
||||
Some(shared_fs_devices),
|
||||
Some(network_devices),
|
||||
Some(host_devices),
|
||||
|
||||
@@ -187,7 +187,7 @@ impl CloudHypervisorInner {
|
||||
}
|
||||
|
||||
async fn boot_vm(&mut self) -> Result<()> {
|
||||
let (shared_fs_devices, network_devices, host_devices, protection_device) =
|
||||
let (pending_disks, shared_fs_devices, network_devices, host_devices, protection_device) =
|
||||
self.get_shared_devices().await?;
|
||||
|
||||
let sandbox_path = get_sandbox_path(&self.id);
|
||||
@@ -211,6 +211,7 @@ impl CloudHypervisorInner {
|
||||
vsock_socket_path,
|
||||
cfg: self.config.clone(),
|
||||
guest_protection_to_use: self.guest_protection_to_use.clone(),
|
||||
pending_disks,
|
||||
shared_fs_devices,
|
||||
host_devices,
|
||||
protection_device,
|
||||
|
||||
Reference in New Issue
Block a user