diff --git a/src/runtime-rs/crates/hypervisor/src/dragonball/inner.rs b/src/runtime-rs/crates/hypervisor/src/dragonball/inner.rs index 61701da09f..4e49e0baad 100644 --- a/src/runtime-rs/crates/hypervisor/src/dragonball/inner.rs +++ b/src/runtime-rs/crates/hypervisor/src/dragonball/inner.rs @@ -7,13 +7,12 @@ use super::vmm_instance::VmmInstance; use crate::{ device::DeviceType, hypervisor_persist::HypervisorState, kernel_param::KernelParams, VmmState, - DEV_HUGEPAGES, HUGETLBFS, HYPERVISOR_DRAGONBALL, SHMEM, VM_ROOTFS_DRIVER_BLK, - VM_ROOTFS_DRIVER_MMIO, + DEV_HUGEPAGES, HUGETLBFS, HYPERVISOR_DRAGONBALL, SHMEM, }; use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; use dragonball::{ - api::v1::{BlockDeviceConfigInfo, BootSourceConfig, VcpuResizeInfo}, + api::v1::{BootSourceConfig, VcpuResizeInfo}, vm::VmConfigInfo, }; @@ -25,7 +24,7 @@ use kata_types::{ use nix::mount::MsFlags; use persist::sandbox_persist::Persist; use shim_interface::KATA_PATH; -use std::{collections::HashSet, fs::create_dir_all, path::PathBuf}; +use std::{collections::HashSet, fs::create_dir_all}; const DRAGONBALL_KERNEL: &str = "vmlinux"; const DRAGONBALL_ROOT_FS: &str = "rootfs"; @@ -122,22 +121,6 @@ impl DragonballInner { ) .context("set_boot_source")?; - // get vm rootfs - let image = { - let initrd_path = self.config.boot_info.initrd.clone(); - let image_path = self.config.boot_info.image.clone(); - if !initrd_path.is_empty() { - Ok(initrd_path) - } else if !image_path.is_empty() { - Ok(image_path) - } else { - Err(anyhow!("failed to get image")) - } - } - .context("get image")?; - self.set_vm_rootfs(&image, &rootfs_driver) - .context("set vm rootfs")?; - // add pending devices while let Some(dev) = self.pending_devices.pop() { self.add_device(dev).await.context("add_device")?; @@ -264,37 +247,6 @@ impl DragonballInner { .context("put boot source") } - fn set_vm_rootfs(&mut self, path: &str, driver: &str) -> Result<()> { - info!(sl!(), "set vm rootfs {} {}", path, driver); - let jail_drive = self - .get_resource(path, DRAGONBALL_ROOT_FS) - .context("get resource")?; - - if driver == VM_ROOTFS_DRIVER_BLK || driver == VM_ROOTFS_DRIVER_MMIO { - let blk_cfg = BlockDeviceConfigInfo { - path_on_host: PathBuf::from(jail_drive), - drive_id: DRAGONBALL_ROOT_FS.to_string(), - is_root_device: false, - // Add it as a regular block device - // This allows us to use a partitioned root block device - // is_read_only - is_read_only: true, - is_direct: false, - ..Default::default() - }; - - self.vmm_instance - .insert_block_device(blk_cfg) - .context("inert block device") - } else { - Err(anyhow!( - "Unknown vm_rootfs driver {} path {:?}", - driver, - path - )) - } - } - fn start_vmm_instance(&mut self) -> Result<()> { info!(sl!(), "Starting VM"); self.vmm_instance diff --git a/src/runtime-rs/crates/hypervisor/src/kernel_param.rs b/src/runtime-rs/crates/hypervisor/src/kernel_param.rs index 554d61660e..e2804dd95c 100644 --- a/src/runtime-rs/crates/hypervisor/src/kernel_param.rs +++ b/src/runtime-rs/crates/hypervisor/src/kernel_param.rs @@ -87,7 +87,7 @@ impl KernelParams { params.push(Param::new("rootflags", "dax ro")); } _ => { - return Err(anyhow!("Unsupported rootfs type")); + return Err(anyhow!("Unsupported rootfs type {}", rootfs_type)); } } } @@ -101,12 +101,12 @@ impl KernelParams { params.push(Param::new("rootflags", "ro")); } _ => { - return Err(anyhow!("Unsupported rootfs type")); + return Err(anyhow!("Unsupported rootfs type {}", rootfs_type)); } } } _ => { - return Err(anyhow!("Unsupported rootfs driver")); + return Err(anyhow!("Unsupported rootfs driver {}", rootfs_driver)); } } @@ -310,7 +310,7 @@ mod tests { ] .to_vec(), }, - result: Err(anyhow!("Unsupported rootfs driver")), + result: Err(anyhow!("Unsupported rootfs driver foo")), }, // Unsupported rootfs type TestData { @@ -324,7 +324,7 @@ mod tests { ] .to_vec(), }, - result: Err(anyhow!("Unsupported rootfs type")), + result: Err(anyhow!("Unsupported rootfs type foo")), }, ]; @@ -332,7 +332,6 @@ mod tests { let msg = format!("test[{}]: {:?}", i, t); let result = KernelParams::new_rootfs_kernel_params(t.rootfs_driver, t.rootfs_type); let msg = format!("{}, result: {:?}", msg, result); - if t.result.is_ok() { assert!(result.is_ok(), "{}", msg); assert_eq!(t.expect_params, result.unwrap()); diff --git a/src/runtime-rs/crates/resource/src/lib.rs b/src/runtime-rs/crates/resource/src/lib.rs index 4e4aae9e87..c12aa0e484 100644 --- a/src/runtime-rs/crates/resource/src/lib.rs +++ b/src/runtime-rs/crates/resource/src/lib.rs @@ -17,6 +17,7 @@ pub mod manager; mod manager_inner; pub mod network; pub mod resource_persist; +use hypervisor::BlockConfig; use network::NetworkConfig; pub mod rootfs; pub mod share_fs; @@ -30,6 +31,7 @@ use kata_types::config::hypervisor::SharedFsInfo; pub enum ResourceConfig { Network(NetworkConfig), ShareFs(SharedFsInfo), + VmRootfs(BlockConfig), } #[derive(Debug, Clone, Copy, PartialEq)] diff --git a/src/runtime-rs/crates/resource/src/manager_inner.rs b/src/runtime-rs/crates/resource/src/manager_inner.rs index b7c26f675c..f4e17ce748 100644 --- a/src/runtime-rs/crates/resource/src/manager_inner.rs +++ b/src/runtime-rs/crates/resource/src/manager_inner.rs @@ -121,6 +121,11 @@ impl ResourceManagerInner { .await .context("failed to handle network")?; } + ResourceConfig::VmRootfs(r) => { + do_handle_device(&self.device_manager, &DeviceConfig::BlockCfg(r)) + .await + .context("do handle device failed.")?; + } }; } diff --git a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs index 87383570f5..1191234fb8 100644 --- a/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs +++ b/src/runtime-rs/crates/runtimes/virt_container/src/sandbox.rs @@ -17,7 +17,7 @@ use common::{ Sandbox, SandboxNetworkEnv, }; use containerd_shim_protos::events::task::TaskOOM; -use hypervisor::{dragonball::Dragonball, Hypervisor, HYPERVISOR_DRAGONBALL}; +use hypervisor::{dragonball::Dragonball, BlockConfig, Hypervisor, HYPERVISOR_DRAGONBALL}; use kata_sys_util::hooks::HookStates; use kata_types::config::TomlConfig; use resource::{ @@ -105,6 +105,8 @@ impl VirtSandbox { network_env: SandboxNetworkEnv, ) -> Result> { let mut resource_configs = vec![]; + + // prepare network config if !network_env.network_created { if let Some(netns_path) = network_env.netns { let network_config = ResourceConfig::Network( @@ -114,10 +116,20 @@ impl VirtSandbox { resource_configs.push(network_config); } } - let hypervisor_config = self.hypervisor.hypervisor_config().await; - let virtio_fs_config = ResourceConfig::ShareFs(hypervisor_config.shared_fs); + + // prepare sharefs device config + let virtio_fs_config = + ResourceConfig::ShareFs(self.hypervisor.hypervisor_config().await.shared_fs); resource_configs.push(virtio_fs_config); + // prepare VM rootfs device config + let vm_rootfs = ResourceConfig::VmRootfs( + self.prepare_rootfs_config() + .await + .context("failed to prepare rootfs device config")?, + ); + resource_configs.push(vm_rootfs); + Ok(resource_configs) } @@ -173,6 +185,29 @@ impl VirtSandbox { }) } + async fn prepare_rootfs_config(&self) -> Result { + let hypervisor_config = self.hypervisor.hypervisor_config().await; + + let image = { + let initrd_path = hypervisor_config.boot_info.initrd.clone(); + let image_path = hypervisor_config.boot_info.image; + if !initrd_path.is_empty() { + Ok(initrd_path) + } else if !image_path.is_empty() { + Ok(image_path) + } else { + Err(anyhow!("failed to get image")) + } + } + .context("get image")?; + + Ok(BlockConfig { + path_on_host: image, + is_readonly: true, + ..Default::default() + }) + } + fn has_prestart_hooks( &self, prestart_hooks: Vec, @@ -212,6 +247,7 @@ impl Sandbox for VirtSandbox { let resources = self .prepare_for_start_sandbox(id, network_env.clone()) .await?; + self.resource_manager .prepare_before_start_vm(resources) .await