Merge pull request #7139 from openanolis/fix/devmanager

runtime-rs: change block index to 0
This commit is contained in:
Zhongtao Hu 2023-07-28 14:04:19 +08:00 committed by GitHub
commit 61a8eabf8e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 232 additions and 122 deletions

View File

@ -32,7 +32,7 @@ pub const DEFAULT_HYPERVISOR: &str = HYPERVISOR_NAME_DRAGONBALL;
pub const DEFAULT_INTERNETWORKING_MODEL: &str = "tcfilter";
pub const DEFAULT_BLOCK_DEVICE_TYPE: &str = "virtio-blk";
pub const DEFAULT_BLOCK_DEVICE_TYPE: &str = "virtio-blk-pci";
pub const DEFAULT_VHOST_USER_STORE_PATH: &str = "/var/run/vhost-user";
pub const DEFAULT_BLOCK_NVDIMM_MEM_OFFSET: u64 = 0;

View File

@ -47,7 +47,7 @@ const VIRTIO_BLK_PCI: &str = "virtio-blk-pci";
const VIRTIO_BLK_MMIO: &str = "virtio-blk-mmio";
const VIRTIO_BLK_CCW: &str = "virtio-blk-ccw";
const VIRTIO_SCSI: &str = "virtio-scsi";
const VIRTIO_PMEM: &str = "nvdimm";
const VIRTIO_PMEM: &str = "virtio-pmem";
const VIRTIO_9P: &str = "virtio-9p";
const VIRTIO_FS: &str = "virtio-fs";
const VIRTIO_FS_INLINE: &str = "inline-virtio-fs";
@ -221,6 +221,10 @@ pub struct BootInfo {
/// If you want that qemu uses the default firmware leave this option empty.
#[serde(default)]
pub firmware: String,
/// Block storage driver to be used for the VM rootfs is backed
/// by a block device. This is virtio-pmem, virtio-blk-pci or virtio-blk-mmio
#[serde(default)]
pub vm_rootfs_driver: String,
}
impl BootInfo {
@ -230,6 +234,11 @@ impl BootInfo {
resolve_path!(self.image, "guest boot image file {} is invalid: {}")?;
resolve_path!(self.initrd, "guest initrd image file {} is invalid: {}")?;
resolve_path!(self.firmware, "firmware image file {} is invalid: {}")?;
if self.vm_rootfs_driver.is_empty() {
self.vm_rootfs_driver = default::DEFAULT_BLOCK_DEVICE_TYPE.to_string();
}
Ok(())
}
@ -242,6 +251,21 @@ impl BootInfo {
if !self.image.is_empty() && !self.initrd.is_empty() {
return Err(eother!("Can not configure both initrd and image for boot"));
}
let l = [
VIRTIO_BLK_PCI,
VIRTIO_BLK_CCW,
VIRTIO_BLK_MMIO,
VIRTIO_PMEM,
VIRTIO_SCSI,
];
if !l.contains(&self.vm_rootfs_driver.as_str()) {
return Err(eother!(
"{} is unsupported block device type.",
self.vm_rootfs_driver
));
}
Ok(())
}

View File

@ -202,6 +202,7 @@ ifneq (,$(DBCMD))
SYSCONFIG_PATHS += $(SYSCONFIG_DB)
CONFIGS += $(CONFIG_DB)
# dragonball-specific options (all should be suffixed by "_DB")
VMROOTFSDRIVER_DB := virtio-blk-pci
DEFMAXVCPUS_DB := 1
DEFBLOCKSTORAGEDRIVER_DB := virtio-blk-mmio
DEFNETWORKMODEL_DB := tcfilter
@ -235,6 +236,7 @@ USER_VARS += SYSCONFIG
USER_VARS += IMAGENAME
USER_VARS += IMAGEPATH
USER_VARS += DEFROOTFSTYPE
USER_VARS += VMROOTFSDRIVER_DB
USER_VARS += MACHINETYPE
USER_VARS += KERNELDIR
USER_VARS += KERNELTYPE

View File

@ -23,6 +23,11 @@ image = "@IMAGEPATH@"
# - erofs
rootfs_type=@DEFROOTFSTYPE@
# Block storage driver to be used for the VM rootfs is backed
# by a block device. This is virtio-blk-pci, virtio-blk-mmio or nvdimm
vm_rootfs_driver = "@VMROOTFSDRIVER_DB@"
# List of valid annotation names for the hypervisor
# Each member of the list is a regular expression, which is the base name
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"

View File

@ -11,9 +11,9 @@ use kata_sys_util::rand::RandomBytes;
use tokio::sync::{Mutex, RwLock};
use crate::{
device::VhostUserBlkDevice, BlockConfig, BlockDevice, Hypervisor, NetworkDevice, VfioDevice,
VhostUserConfig, KATA_BLK_DEV_TYPE, KATA_MMIO_BLK_DEV_TYPE, VIRTIO_BLOCK_MMIO,
VIRTIO_BLOCK_PCI,
vhost_user_blk::VhostUserBlkDevice, BlockConfig, BlockDevice, Hypervisor, NetworkDevice,
VfioDevice, VhostUserConfig, KATA_BLK_DEV_TYPE, KATA_MMIO_BLK_DEV_TYPE, KATA_NVDIMM_DEV_TYPE,
VIRTIO_BLOCK_MMIO, VIRTIO_BLOCK_PCI, VIRTIO_PMEM,
};
use super::{
@ -23,57 +23,66 @@ use super::{
pub type ArcMutexDevice = Arc<Mutex<dyn Device>>;
macro_rules! declare_index {
($self:ident, $index:ident, $released_index:ident) => {{
let current_index = if let Some(index) = $self.$released_index.pop() {
index
} else {
$self.$index
};
$self.$index += 1;
Ok(current_index)
}};
}
macro_rules! release_index {
($self:ident, $index:ident, $released_index:ident) => {{
$self.$released_index.push($index);
$self.$released_index.sort_by(|a, b| b.cmp(a));
}};
}
/// block_index and released_block_index are used to search an available block index
/// in Sandbox.
/// pmem_index and released_pmem_index are used to search an available pmem index
/// in Sandbox.
///
/// @block_driver to be used for block device;
/// @block_index generally default is 1 for <vdb>;
/// @pmem_index generally default is 0 for <pmem0>;
/// @block_index generally default is 0 for <vda>;
/// @released_pmem_index for pmem devices removed and indexes will released at the same time.
/// @released_block_index for blk devices removed and indexes will released at the same time.
#[derive(Clone, Debug, Default)]
struct SharedInfo {
block_driver: String,
pmem_index: u64,
block_index: u64,
released_pmem_index: Vec<u64>,
released_block_index: Vec<u64>,
}
impl SharedInfo {
async fn new(hypervisor: Arc<dyn Hypervisor>) -> Self {
// get hypervisor block driver
let block_driver = match hypervisor
.hypervisor_config()
.await
.blockdev_info
.block_device_driver
.as_str()
{
// convert the block driver to kata type
VIRTIO_BLOCK_MMIO => KATA_MMIO_BLK_DEV_TYPE.to_string(),
VIRTIO_BLOCK_PCI => KATA_BLK_DEV_TYPE.to_string(),
_ => "".to_string(),
};
async fn new() -> Self {
SharedInfo {
block_driver,
block_index: 1,
pmem_index: 0,
block_index: 0,
released_pmem_index: vec![],
released_block_index: vec![],
}
}
// declare the available block index
fn declare_device_index(&mut self) -> Result<u64> {
let current_index = if let Some(index) = self.released_block_index.pop() {
index
fn declare_device_index(&mut self, is_pmem: bool) -> Result<u64> {
if is_pmem {
declare_index!(self, pmem_index, released_pmem_index)
} else {
self.block_index
};
self.block_index += 1;
Ok(current_index)
declare_index!(self, block_index, released_block_index)
}
}
fn release_device_index(&mut self, index: u64) {
self.released_block_index.push(index);
self.released_block_index.sort_by(|a, b| b.cmp(a));
fn release_device_index(&mut self, index: u64, is_pmem: bool) {
if is_pmem {
release_index!(self, index, released_pmem_index);
} else {
release_index!(self, index, released_block_index);
}
}
}
@ -90,12 +99,20 @@ impl DeviceManager {
let devices = HashMap::<String, ArcMutexDevice>::new();
Ok(DeviceManager {
devices,
hypervisor: hypervisor.clone(),
shared_info: SharedInfo::new(hypervisor.clone()).await,
hypervisor,
shared_info: SharedInfo::new().await,
})
}
pub async fn try_add_device(&mut self, device_id: &str) -> Result<()> {
async fn get_block_driver(&self) -> String {
self.hypervisor
.hypervisor_config()
.await
.blockdev_info
.block_device_driver
}
async fn try_add_device(&mut self, device_id: &str) -> Result<()> {
// find the device
let device = self
.devices
@ -108,7 +125,10 @@ impl DeviceManager {
if let Err(e) = result {
match device_guard.get_device_info().await {
DeviceType::Block(device) => {
self.shared_info.release_device_index(device.config.index);
self.shared_info.release_device_index(
device.config.index,
device.config.driver_option == *KATA_NVDIMM_DEV_TYPE,
);
}
DeviceType::Vfio(device) => {
// safe here:
@ -116,11 +136,12 @@ impl DeviceManager {
// and needs do release_device_index. otherwise, let it go.
if device.config.dev_type == DEVICE_TYPE_BLOCK {
self.shared_info
.release_device_index(device.config.virt_path.unwrap().0);
.release_device_index(device.config.virt_path.unwrap().0, false);
}
}
DeviceType::VhostUserBlk(device) => {
self.shared_info.release_device_index(device.config.index);
self.shared_info
.release_device_index(device.config.index, false);
}
_ => {
debug!(sl!(), "no need to do release device index.");
@ -142,8 +163,14 @@ impl DeviceManager {
let result = match device_guard.detach(self.hypervisor.as_ref()).await {
Ok(index) => {
if let Some(i) = index {
// release the declared block device index
self.shared_info.release_device_index(i);
// release the declared device index
let is_pmem =
if let DeviceType::Block(blk) = device_guard.get_device_info().await {
blk.config.driver_option == *KATA_NVDIMM_DEV_TYPE
} else {
false
};
self.shared_info.release_device_index(i, is_pmem);
}
Ok(())
}
@ -209,13 +236,19 @@ impl DeviceManager {
None
}
fn get_dev_virt_path(&mut self, dev_type: &str) -> Result<Option<(u64, String)>> {
fn get_dev_virt_path(
&mut self,
dev_type: &str,
is_pmem: bool,
) -> Result<Option<(u64, String)>> {
let virt_path = if dev_type == DEVICE_TYPE_BLOCK {
// generate virt path
let current_index = self.shared_info.declare_device_index()?;
let drive_name = get_virt_drive_name(current_index as i32)?;
let current_index = self.shared_info.declare_device_index(is_pmem)?;
let drive_name = if is_pmem {
format!("pmem{}", current_index)
} else {
get_virt_drive_name(current_index as i32)?
};
let virt_path_name = format!("/dev/{}", drive_name);
Some((current_index, virt_path_name))
} else {
// only dev_type is block, otherwise, it's None.
@ -247,8 +280,7 @@ impl DeviceManager {
if let Some(device_matched_id) = self.find_device(dev_host_path).await {
return Ok(device_matched_id);
}
let virt_path = self.get_dev_virt_path(vfio_dev_config.dev_type.as_str())?;
let virt_path = self.get_dev_virt_path(vfio_dev_config.dev_type.as_str(), false)?;
vfio_dev_config.virt_path = virt_path;
Arc::new(Mutex::new(VfioDevice::new(
@ -304,12 +336,28 @@ impl DeviceManager {
config: &VhostUserConfig,
device_id: String,
) -> Result<ArcMutexDevice> {
// TODO virtio-scsi
let mut vhu_blk_config = config.clone();
vhu_blk_config.driver_option = self.shared_info.block_driver.clone();
match vhu_blk_config.driver_option.as_str() {
// convert the block driver to kata type
VIRTIO_BLOCK_MMIO => {
vhu_blk_config.driver_option = KATA_MMIO_BLK_DEV_TYPE.to_string();
}
VIRTIO_BLOCK_PCI => {
vhu_blk_config.driver_option = KATA_BLK_DEV_TYPE.to_string();
}
_ => {
return Err(anyhow!(
"unsupported driver type {}",
vhu_blk_config.driver_option
));
}
};
// generate block device index and virt path
// safe here, Block device always has virt_path.
if let Some(virt_path) = self.get_dev_virt_path(DEVICE_TYPE_BLOCK)? {
if let Some(virt_path) = self.get_dev_virt_path(DEVICE_TYPE_BLOCK, false)? {
vhu_blk_config.index = virt_path.0;
vhu_blk_config.virt_path = virt_path.1;
}
@ -326,10 +374,30 @@ impl DeviceManager {
device_id: String,
) -> Result<ArcMutexDevice> {
let mut block_config = config.clone();
block_config.driver_option = self.shared_info.block_driver.clone();
let mut is_pmem = false;
match block_config.driver_option.as_str() {
// convert the block driver to kata type
VIRTIO_BLOCK_MMIO => {
block_config.driver_option = KATA_MMIO_BLK_DEV_TYPE.to_string();
}
VIRTIO_BLOCK_PCI => {
block_config.driver_option = KATA_BLK_DEV_TYPE.to_string();
}
VIRTIO_PMEM => {
block_config.driver_option = KATA_NVDIMM_DEV_TYPE.to_string();
is_pmem = true;
}
_ => {
return Err(anyhow!(
"unsupported driver type {}",
block_config.driver_option
));
}
};
// generate virt path
if let Some(virt_path) = self.get_dev_virt_path(DEVICE_TYPE_BLOCK)? {
if let Some(virt_path) = self.get_dev_virt_path(DEVICE_TYPE_BLOCK, is_pmem)? {
block_config.index = virt_path.0;
block_config.virt_path = virt_path.1;
}
@ -398,3 +466,7 @@ pub async fn do_handle_device(
Ok(device_info)
}
pub async fn get_block_driver(d: &RwLock<DeviceManager>) -> String {
d.read().await.get_block_driver().await
}

View File

@ -16,8 +16,8 @@ pub use vfio::{
VfioBusMode, VfioConfig, VfioDevice,
};
pub use virtio_blk::{
BlockConfig, BlockDevice, KATA_BLK_DEV_TYPE, KATA_MMIO_BLK_DEV_TYPE, VIRTIO_BLOCK_MMIO,
VIRTIO_BLOCK_PCI,
BlockConfig, BlockDevice, KATA_BLK_DEV_TYPE, KATA_MMIO_BLK_DEV_TYPE, KATA_NVDIMM_DEV_TYPE,
VIRTIO_BLOCK_MMIO, VIRTIO_BLOCK_PCI, VIRTIO_PMEM,
};
pub use virtio_fs::{
ShareFsDevice, ShareFsDeviceConfig, ShareFsMountConfig, ShareFsMountDevice, ShareFsMountType,

View File

@ -13,8 +13,10 @@ use async_trait::async_trait;
/// VIRTIO_BLOCK_PCI indicates block driver is virtio-pci based
pub const VIRTIO_BLOCK_PCI: &str = "virtio-blk-pci";
pub const VIRTIO_BLOCK_MMIO: &str = "virtio-blk-mmio";
pub const VIRTIO_PMEM: &str = "virtio-pmem";
pub const KATA_MMIO_BLK_DEV_TYPE: &str = "mmioblk";
pub const KATA_BLK_DEV_TYPE: &str = "blk";
pub const KATA_NVDIMM_DEV_TYPE: &str = "nvdimm";
#[derive(Debug, Clone, Default)]
pub struct BlockConfig {

View File

@ -7,13 +7,12 @@
use super::vmm_instance::VmmInstance;
use crate::{
device::DeviceType, hypervisor_persist::HypervisorState, kernel_param::KernelParams, VmmState,
DEV_HUGEPAGES, HUGETLBFS, HYPERVISOR_DRAGONBALL, SHMEM, VM_ROOTFS_DRIVER_BLK,
VM_ROOTFS_DRIVER_MMIO,
DEV_HUGEPAGES, HUGETLBFS, HYPERVISOR_DRAGONBALL, SHMEM,
};
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use dragonball::{
api::v1::{BlockDeviceConfigInfo, BootSourceConfig, VcpuResizeInfo},
api::v1::{BootSourceConfig, VcpuResizeInfo},
vm::VmConfigInfo,
};
@ -25,7 +24,7 @@ use kata_types::{
use nix::mount::MsFlags;
use persist::sandbox_persist::Persist;
use shim_interface::KATA_PATH;
use std::{collections::HashSet, fs::create_dir_all, path::PathBuf};
use std::{collections::HashSet, fs::create_dir_all};
const DRAGONBALL_KERNEL: &str = "vmlinux";
const DRAGONBALL_ROOT_FS: &str = "rootfs";
@ -122,22 +121,6 @@ impl DragonballInner {
)
.context("set_boot_source")?;
// get vm rootfs
let image = {
let initrd_path = self.config.boot_info.initrd.clone();
let image_path = self.config.boot_info.image.clone();
if !initrd_path.is_empty() {
Ok(initrd_path)
} else if !image_path.is_empty() {
Ok(image_path)
} else {
Err(anyhow!("failed to get image"))
}
}
.context("get image")?;
self.set_vm_rootfs(&image, &rootfs_driver)
.context("set vm rootfs")?;
// add pending devices
while let Some(dev) = self.pending_devices.pop() {
self.add_device(dev).await.context("add_device")?;
@ -264,37 +247,6 @@ impl DragonballInner {
.context("put boot source")
}
fn set_vm_rootfs(&mut self, path: &str, driver: &str) -> Result<()> {
info!(sl!(), "set vm rootfs {} {}", path, driver);
let jail_drive = self
.get_resource(path, DRAGONBALL_ROOT_FS)
.context("get resource")?;
if driver == VM_ROOTFS_DRIVER_BLK || driver == VM_ROOTFS_DRIVER_MMIO {
let blk_cfg = BlockDeviceConfigInfo {
path_on_host: PathBuf::from(jail_drive),
drive_id: DRAGONBALL_ROOT_FS.to_string(),
is_root_device: false,
// Add it as a regular block device
// This allows us to use a partitioned root block device
// is_read_only
is_read_only: true,
is_direct: false,
..Default::default()
};
self.vmm_instance
.insert_block_device(blk_cfg)
.context("inert block device")
} else {
Err(anyhow!(
"Unknown vm_rootfs driver {} path {:?}",
driver,
path
))
}
}
fn start_vmm_instance(&mut self) -> Result<()> {
info!(sl!(), "Starting VM");
self.vmm_instance

View File

@ -87,7 +87,7 @@ impl KernelParams {
params.push(Param::new("rootflags", "dax ro"));
}
_ => {
return Err(anyhow!("Unsupported rootfs type"));
return Err(anyhow!("Unsupported rootfs type {}", rootfs_type));
}
}
}
@ -101,12 +101,12 @@ impl KernelParams {
params.push(Param::new("rootflags", "ro"));
}
_ => {
return Err(anyhow!("Unsupported rootfs type"));
return Err(anyhow!("Unsupported rootfs type {}", rootfs_type));
}
}
}
_ => {
return Err(anyhow!("Unsupported rootfs driver"));
return Err(anyhow!("Unsupported rootfs driver {}", rootfs_driver));
}
}
@ -310,7 +310,7 @@ mod tests {
]
.to_vec(),
},
result: Err(anyhow!("Unsupported rootfs driver")),
result: Err(anyhow!("Unsupported rootfs driver foo")),
},
// Unsupported rootfs type
TestData {
@ -324,7 +324,7 @@ mod tests {
]
.to_vec(),
},
result: Err(anyhow!("Unsupported rootfs type")),
result: Err(anyhow!("Unsupported rootfs type foo")),
},
];
@ -332,7 +332,6 @@ mod tests {
let msg = format!("test[{}]: {:?}", i, t);
let result = KernelParams::new_rootfs_kernel_params(t.rootfs_driver, t.rootfs_type);
let msg = format!("{}, result: {:?}", msg, result);
if t.result.is_ok() {
assert!(result.is_ok(), "{}", msg);
assert_eq!(t.expect_params, result.unwrap());

View File

@ -17,6 +17,7 @@ pub mod manager;
mod manager_inner;
pub mod network;
pub mod resource_persist;
use hypervisor::BlockConfig;
use network::NetworkConfig;
pub mod rootfs;
pub mod share_fs;
@ -30,6 +31,7 @@ use kata_types::config::hypervisor::SharedFsInfo;
pub enum ResourceConfig {
Network(NetworkConfig),
ShareFs(SharedFsInfo),
VmRootfs(BlockConfig),
}
#[derive(Debug, Clone, Copy, PartialEq)]

View File

@ -121,6 +121,11 @@ impl ResourceManagerInner {
.await
.context("failed to handle network")?;
}
ResourceConfig::VmRootfs(r) => {
do_handle_device(&self.device_manager, &DeviceConfig::BlockCfg(r))
.await
.context("do handle device failed.")?;
}
};
}

View File

@ -11,7 +11,7 @@ use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use hypervisor::{
device::{
device_manager::{do_handle_device, DeviceManager},
device_manager::{do_handle_device, get_block_driver, DeviceManager},
DeviceConfig, DeviceType,
},
BlockConfig,
@ -43,9 +43,12 @@ impl BlockRootfs {
fs::create_dir_all(&host_path)
.map_err(|e| anyhow!("failed to create rootfs dir {}: {:?}", host_path, e))?;
let block_driver = get_block_driver(d).await;
let block_device_config = &mut BlockConfig {
major: stat::major(dev_id) as i64,
minor: stat::minor(dev_id) as i64,
driver_option: block_driver,
..Default::default()
};

View File

@ -16,7 +16,7 @@ use crate::volume::utils::{
};
use hypervisor::{
device::{
device_manager::{do_handle_device, DeviceManager},
device_manager::{do_handle_device, get_block_driver, DeviceManager},
DeviceConfig, DeviceType,
},
BlockConfig,
@ -42,6 +42,8 @@ impl BlockVolume {
// default block device fs type: ext4.
let mut blk_dev_fstype = DEFAULT_VOLUME_FS_TYPE.to_string();
let block_driver = get_block_driver(d).await;
let block_device_config = match m.r#type.as_str() {
KATA_MOUNT_BIND_TYPE => {
let fstat = stat::stat(mnt_src).context(format!("stat {}", m.source))?;
@ -49,6 +51,7 @@ impl BlockVolume {
BlockConfig {
major: stat::major(fstat.st_rdev) as i64,
minor: stat::minor(fstat.st_rdev) as i64,
driver_option: block_driver,
..Default::default()
}
}
@ -77,6 +80,7 @@ impl BlockVolume {
BlockConfig {
path_on_host: v.device,
driver_option: block_driver,
..Default::default()
}
}

View File

@ -16,7 +16,7 @@ use crate::volume::utils::{
};
use hypervisor::{
device::{
device_manager::{do_handle_device, DeviceManager},
device_manager::{do_handle_device, get_block_driver, DeviceManager},
DeviceConfig, DeviceType,
},
VhostUserConfig, VhostUserType,
@ -73,9 +73,12 @@ impl SPDKVolume {
}
}
let vhu_blk_config = &mut VhostUserConfig {
let block_driver = get_block_driver(d).await;
let mut vhu_blk_config = &mut VhostUserConfig {
socket_path: device,
device_type: VhostUserType::Blk("vhost-user-blk-pci".to_owned()),
driver_option: block_driver,
..Default::default()
};

View File

@ -17,7 +17,7 @@ use common::{
Sandbox, SandboxNetworkEnv,
};
use containerd_shim_protos::events::task::TaskOOM;
use hypervisor::{dragonball::Dragonball, Hypervisor, HYPERVISOR_DRAGONBALL};
use hypervisor::{dragonball::Dragonball, BlockConfig, Hypervisor, HYPERVISOR_DRAGONBALL};
use kata_sys_util::hooks::HookStates;
use kata_types::config::TomlConfig;
use resource::{
@ -105,6 +105,8 @@ impl VirtSandbox {
network_env: SandboxNetworkEnv,
) -> Result<Vec<ResourceConfig>> {
let mut resource_configs = vec![];
// prepare network config
if !network_env.network_created {
if let Some(netns_path) = network_env.netns {
let network_config = ResourceConfig::Network(
@ -114,10 +116,20 @@ impl VirtSandbox {
resource_configs.push(network_config);
}
}
let hypervisor_config = self.hypervisor.hypervisor_config().await;
let virtio_fs_config = ResourceConfig::ShareFs(hypervisor_config.shared_fs);
// prepare sharefs device config
let virtio_fs_config =
ResourceConfig::ShareFs(self.hypervisor.hypervisor_config().await.shared_fs);
resource_configs.push(virtio_fs_config);
// prepare VM rootfs device config
let vm_rootfs = ResourceConfig::VmRootfs(
self.prepare_rootfs_config()
.await
.context("failed to prepare rootfs device config")?,
);
resource_configs.push(vm_rootfs);
Ok(resource_configs)
}
@ -173,6 +185,30 @@ impl VirtSandbox {
})
}
async fn prepare_rootfs_config(&self) -> Result<BlockConfig> {
let boot_info = self.hypervisor.hypervisor_config().await.boot_info;
let image = {
let initrd_path = boot_info.initrd.clone();
let image_path = boot_info.image;
if !initrd_path.is_empty() {
Ok(initrd_path)
} else if !image_path.is_empty() {
Ok(image_path)
} else {
Err(anyhow!("failed to get image"))
}
}
.context("get image")?;
Ok(BlockConfig {
path_on_host: image,
is_readonly: true,
driver_option: boot_info.vm_rootfs_driver,
..Default::default()
})
}
fn has_prestart_hooks(
&self,
prestart_hooks: Vec<oci::Hook>,
@ -212,6 +248,7 @@ impl Sandbox for VirtSandbox {
let resources = self
.prepare_for_start_sandbox(id, network_env.clone())
.await?;
self.resource_manager
.prepare_before_start_vm(resources)
.await