runtime-rs: make virtio-blk use the pci bus as default

Since Dragonball's MMIO bus only supports legacy interrupts, while
the PCI bus supports MSIX interrupts, to improve performance for block
devices, virtio-blk devices are set to PCI bus mode by default.

We had tested the virtio-blk's performance using the fio with the
following commands:

fio -filename=./test  -direct=1 -iodepth 32 -thread -rw=randrw
-rwmixread=50 -ioengine=libaio -bs=4k -size=10G -numjobs=4
-group_reporting -name=mytest

When used the legacy interrupt, the io test is as below:

read : io=20485MB, bw=195162KB/s, iops=48790, runt=107485msec
write: io=20475MB, bw=195061KB/s, iops=48765, runt=107485msec

Once switched to msix innterrupt, the io test is as below:

read : io=20485MB, bw=260862KB/s, iops=65215, runt= 80414msec
write: io=20475MB, bw=260727KB/s, iops=65181, runt= 80414msec

We can get 34% performance improvement.

Signed-off-by: Fupan Li <fupan.lfp@antgroup.com>
This commit is contained in:
Fupan Li
2025-08-25 16:21:09 +08:00
committed by Fupan Li
parent 2dd172c5b6
commit c80ddd3fd9
6 changed files with 55 additions and 23 deletions

View File

@@ -227,7 +227,7 @@ ifneq (,$(DBCMD))
# dragonball-specific options (all should be suffixed by "_DB")
VMROOTFSDRIVER_DB := virtio-blk-pci
DEFMAXVCPUS_DB := 0
DEFBLOCKSTORAGEDRIVER_DB := virtio-blk-mmio
DEFBLOCKSTORAGEDRIVER_DB := virtio-blk-pci
DEFNETWORKMODEL_DB := tcfilter
KERNELPARAMS_DB = console=ttyS1 agent.log_vport=1025
KERNELTYPE_DB = uncompressed

View File

@@ -122,7 +122,7 @@ default_maxmemory = @DEFMAXMEMSZ@
# is backed by a block device or a file. This driver facilitates attaching the
# storage directly to the guest VM.
#
# DB only supports virtio-blk-mmio.
# DB supports virtio-blk-mmio and virtio-blk-pci, virtio-blk-pci is prefered
block_device_driver = "@DEFBLOCKSTORAGEDRIVER_DB@"
# This option changes the default hypervisor and kernel parameters

View File

@@ -18,6 +18,7 @@ use dragonball::{
vm::VmConfigInfo,
};
use crate::DEFAULT_HOTPLUG_TIMEOUT;
use kata_sys_util::mount;
use kata_types::{
capabilities::{Capabilities, CapabilityBits},
@@ -37,8 +38,6 @@ const DRAGONBALL_INITRD: &str = "initrd";
const DRAGONBALL_ROOT_FS: &str = "rootfs";
const BALLOON_DEVICE_ID: &str = "balloon0";
const MEM_DEVICE_ID: &str = "memmr0";
/// default hotplug timeout
const DEFAULT_HOTPLUG_TIMEOUT: u64 = 250;
#[derive(Debug)]
pub struct DragonballInner {

View File

@@ -6,13 +6,15 @@
use std::convert::TryFrom;
use std::path::PathBuf;
use std::time::Duration;
use super::{build_dragonball_network_config, DragonballInner};
use crate::device::pci_path::PciPath;
use crate::VhostUserConfig;
use crate::{device::pci_path::PciPath, KATA_BLK_DEV_TYPE};
use crate::{
device::DeviceType, HybridVsockConfig, NetworkConfig, ShareFsConfig, ShareFsMountConfig,
ShareFsMountOperation, ShareFsMountType, VfioDevice, VmmState, JAILER_ROOT,
ShareFsMountOperation, ShareFsMountType, VfioDevice, VmmState, DEFAULT_HOTPLUG_TIMEOUT,
JAILER_ROOT,
};
use anyhow::{anyhow, Context, Result};
use dbs_utils::net::MacAddr;
@@ -62,15 +64,30 @@ impl DragonballInner {
Ok(DeviceType::Vfio(hostdev))
}
DeviceType::Block(block) => {
self.add_block_device(
block.config.path_on_host.as_str(),
block.device_id.as_str(),
block.config.is_readonly,
block.config.no_drop,
block.config.is_direct,
)
.context("add block device")?;
DeviceType::Block(mut block) => {
let use_pci_bus = if block.config.driver_option == KATA_BLK_DEV_TYPE {
Some(true)
} else {
None
};
let guest_device_id = self
.add_block_device(
block.config.path_on_host.as_str(),
block.device_id.as_str(),
block.config.is_readonly,
block.config.no_drop,
block.config.is_direct,
use_pci_bus,
)
.context("add block device")?;
if let Some(slot) = guest_device_id {
if slot > 0 {
block.config.pci_path = Some(PciPath::try_from(slot as u32)?);
}
}
Ok(DeviceType::Block(block))
}
DeviceType::VhostUserBlk(block) => {
@@ -80,6 +97,7 @@ impl DragonballInner {
block.is_readonly,
block.no_drop,
None,
None,
)
.context("add vhost user based block device")?;
Ok(DeviceType::VhostUserBlk(block))
@@ -208,7 +226,8 @@ impl DragonballInner {
read_only: bool,
no_drop: bool,
is_direct: Option<bool>,
) -> Result<()> {
use_pci_bus: Option<bool>,
) -> Result<Option<i32>> {
let jailed_drive = self.get_resource(path, id).context("get resource")?;
self.cached_block_devices.insert(id.to_string());
@@ -219,10 +238,11 @@ impl DragonballInner {
is_direct: is_direct.unwrap_or(self.config.blockdev_info.block_device_cache_direct),
no_drop,
is_read_only: read_only,
use_pci_bus,
..Default::default()
};
self.vmm_instance
.insert_block_device(blk_cfg)
.insert_block_device(blk_cfg, Duration::from_millis(DEFAULT_HOTPLUG_TIMEOUT))
.context("insert block device")
}

View File

@@ -237,12 +237,22 @@ impl VmmInstance {
Ok(())
}
pub fn insert_block_device(&self, device_cfg: BlockDeviceConfigInfo) -> Result<()> {
self.handle_request_with_retry(Request::Sync(VmmAction::InsertBlockDevice(
device_cfg.clone(),
)))
.with_context(|| format!("Failed to insert block device {:?}", device_cfg))?;
Ok(())
pub fn insert_block_device(
&self,
device_cfg: BlockDeviceConfigInfo,
timeout: Duration,
) -> Result<Option<i32>> {
let vmmdata = self
.handle_request_with_retry(Request::Sync(VmmAction::InsertBlockDevice(
device_cfg.clone(),
)))
.with_context(|| format!("Failed to insert block device {:?}", device_cfg))?;
if let VmmData::SyncHotplug((_, receiver)) = vmmdata {
let guest_dev_id = receiver.recv_timeout(timeout)?;
return Ok(guest_dev_id);
}
Ok(None)
}
pub fn remove_block_device(&self, id: &str) -> Result<()> {

View File

@@ -69,6 +69,9 @@ pub const HYPERVISOR_REMOTE: &str = "remote";
pub const DEFAULT_HYBRID_VSOCK_NAME: &str = "kata.hvsock";
pub const JAILER_ROOT: &str = "root";
/// default hotplug timeout
const DEFAULT_HOTPLUG_TIMEOUT: u64 = 250;
#[derive(PartialEq, Debug, Clone)]
pub(crate) enum VmmState {
NotReady,