runtime-rs: change hypervisor add_device trait to return device copy

Block(virtio-blk) and vfio devices are currently not handled correctly
by the agent as the agent is not provided with correct PCI paths for
these devices.

The PCI paths for these devices can be inferred from the PCI information
provided by the hypervisor when the device is added.
Hence changing the add_device trait function to return a device copy
with PCI info potentially provided by the hypervisor. This can then be
provided to the agent to correctly detect devices within the VM.

This commit includes implementation for PCI info update for
cloud-hupervisor for virtio-blk devices with stubs provided for other
hypervisors.

Removing Vsock from the DeviceType enum as Vsock currently does not
implement the Device Trait, it has no attach and detach trait functions
among others. Part of the reason is because these functions require Vsock
to implement Clone trait as these functions need cloned copies to be
passed down the hypervisor.

The change introduced for returning a device copy from the add_device
hypervisor trait explicitly requires a device to implement
Copy trait. Hence removing Vsock from the DeviceType enum for now, as
its implementation is incomplete and not currently used.

Note, one of the blockers for adding the Clone trait to Vsock is that it
currently includes a file handle which cannot be cloned. For Clone and
Device Traits to be implemented for Vsock, it requires an implementation
change in the future for it to be cloneable.

Fixes: #8283

Signed-off-by: Archana Shinde <archana.m.shinde@intel.com>
This commit is contained in:
Archana Shinde 2023-10-21 05:37:33 -07:00
parent 0aac3c76ee
commit a2bbbad711
11 changed files with 103 additions and 53 deletions

View File

@ -70,7 +70,7 @@ pub async fn cloud_hypervisor_vm_stop(mut socket: UnixStream) -> Result<Option<S
.await?
}
#[derive(Serialize, Deserialize)]
#[derive(Deserialize, Debug)]
pub struct PciDeviceInfo {
pub id: String,
pub bdf: String,

View File

@ -7,16 +7,18 @@
use super::inner::CloudHypervisorInner;
use crate::device::DeviceType;
use crate::BlockDevice;
use crate::HybridVsockConfig;
use crate::HybridVsockDevice;
use crate::NetworkConfig;
use crate::PciPath;
use crate::ShareFsDevice;
use crate::ShareFsDeviceConfig;
use crate::VfioDevice;
use crate::VmmState;
use anyhow::{anyhow, Context, Result};
use ch_config::ch_api::cloud_hypervisor_vm_device_add;
use ch_config::ch_api::{
cloud_hypervisor_vm_blockdev_add, cloud_hypervisor_vm_device_add,
cloud_hypervisor_vm_device_remove, cloud_hypervisor_vm_fs_add, PciDeviceInfo,
VmRemoveDeviceData,
cloud_hypervisor_vm_blockdev_add, cloud_hypervisor_vm_device_remove,
cloud_hypervisor_vm_fs_add, PciDeviceInfo, VmRemoveDeviceData,
};
use ch_config::convert::{DEFAULT_DISK_QUEUES, DEFAULT_DISK_QUEUE_SIZE, DEFAULT_NUM_PCI_SEGMENTS};
use ch_config::DiskConfig;
@ -31,7 +33,7 @@ pub const DEFAULT_FS_QUEUES: usize = 1;
const DEFAULT_FS_QUEUE_SIZE: u16 = 1024;
impl CloudHypervisorInner {
pub(crate) async fn add_device(&mut self, device: DeviceType) -> Result<()> {
pub(crate) async fn add_device(&mut self, device: DeviceType) -> Result<DeviceType> {
if self.state != VmmState::VmRunning {
// If the VM is not running, add the device to the pending list to
// be handled later.
@ -55,8 +57,8 @@ impl CloudHypervisorInner {
// - Network details need to be saved for later application.
//
match device {
DeviceType::ShareFs(_) => self.pending_devices.insert(0, device),
DeviceType::Network(_) => self.pending_devices.insert(0, device),
DeviceType::ShareFs(_) => self.pending_devices.insert(0, device.clone()),
DeviceType::Network(_) => self.pending_devices.insert(0, device.clone()),
_ => {
debug!(
sl!(),
@ -65,20 +67,18 @@ impl CloudHypervisorInner {
}
}
return Ok(());
return Ok(device);
}
self.handle_add_device(device).await?;
Ok(())
self.handle_add_device(device).await
}
async fn handle_add_device(&mut self, device: DeviceType) -> Result<()> {
async fn handle_add_device(&mut self, device: DeviceType) -> Result<DeviceType> {
match device {
DeviceType::ShareFs(sharefs) => self.handle_share_fs_device(sharefs.config).await,
DeviceType::HybridVsock(hvsock) => self.handle_hvsock_device(&hvsock.config).await,
DeviceType::ShareFs(sharefs) => self.handle_share_fs_device(sharefs).await,
DeviceType::HybridVsock(hvsock) => self.handle_hvsock_device(hvsock).await,
DeviceType::Block(block) => self.handle_block_device(block).await,
DeviceType::Vfio(vfiodev) => self.handle_vfio_device(&vfiodev).await,
DeviceType::Vfio(vfiodev) => self.handle_vfio_device(vfiodev).await,
_ => Err(anyhow!("unhandled device: {:?}", device)),
}
}
@ -108,9 +108,13 @@ impl CloudHypervisorInner {
}
}
async fn handle_share_fs_device(&mut self, cfg: ShareFsDeviceConfig) -> Result<()> {
if cfg.fs_type != VIRTIO_FS {
return Err(anyhow!("cannot handle share fs type: {:?}", cfg.fs_type));
async fn handle_share_fs_device(&mut self, sharefs: ShareFsDevice) -> Result<DeviceType> {
let device: ShareFsDevice = sharefs.clone();
if device.config.fs_type != VIRTIO_FS {
return Err(anyhow!(
"cannot handle share fs type: {:?}",
device.config.fs_type
));
}
let socket = self
@ -119,26 +123,26 @@ impl CloudHypervisorInner {
.ok_or("missing socket")
.map_err(|e| anyhow!(e))?;
let num_queues: usize = if cfg.queue_num > 0 {
cfg.queue_num as usize
let num_queues: usize = if device.config.queue_num > 0 {
device.config.queue_num as usize
} else {
DEFAULT_FS_QUEUES
};
let queue_size: u16 = if cfg.queue_num > 0 {
u16::try_from(cfg.queue_size)?
let queue_size: u16 = if device.config.queue_num > 0 {
u16::try_from(device.config.queue_size)?
} else {
DEFAULT_FS_QUEUE_SIZE
};
let socket_path = if cfg.sock_path.starts_with('/') {
PathBuf::from(cfg.sock_path)
let socket_path = if device.config.sock_path.starts_with('/') {
PathBuf::from(device.config.sock_path)
} else {
scoped_join(&self.vm_path, cfg.sock_path)?
scoped_join(&self.vm_path, device.config.sock_path)?
};
let fs_config = FsConfig {
tag: cfg.mount_tag,
tag: device.config.mount_tag,
socket: socket_path,
num_queues,
queue_size,
@ -157,14 +161,16 @@ impl CloudHypervisorInner {
debug!(sl!(), "fs add response: {:?}", detail);
}
Ok(())
Ok(DeviceType::ShareFs(sharefs))
}
async fn handle_hvsock_device(&mut self, _cfg: &HybridVsockConfig) -> Result<()> {
Ok(())
async fn handle_hvsock_device(&mut self, device: HybridVsockDevice) -> Result<DeviceType> {
Ok(DeviceType::HybridVsock(device))
}
async fn handle_vfio_device(&mut self, device: &VfioDevice) -> Result<()> {
async fn handle_vfio_device(&mut self, device: VfioDevice) -> Result<DeviceType> {
let vfio_device: VfioDevice = device.clone();
// A device with multi-funtions, or a IOMMU group with one more
// devices, the Primary device is selected to be passed to VM.
// And the the first one is Primary device.
@ -206,7 +212,7 @@ impl CloudHypervisorInner {
.insert(device.device_id.clone(), dev_info.id);
}
Ok(())
Ok(DeviceType::Vfio(vfio_device))
}
async fn remove_vfio_device(&mut self, device: &VfioDevice) -> Result<()> {
@ -242,7 +248,31 @@ impl CloudHypervisorInner {
Ok(())
}
async fn handle_block_device(&mut self, device: BlockDevice) -> Result<()> {
// Various cloud-hypervisor APIs report a PCI address in "BB:DD.F"
// form within the PciDeviceInfo struct.
// eg "0000:00:DD.F"
fn clh_pci_info_to_path(bdf: &str) -> Result<PciPath> {
let tokens: Vec<&str> = bdf.split(':').collect();
if tokens.len() != 3 || tokens[0] != "0000" || tokens[1] != "00" {
return Err(anyhow!(
"Unexpected PCI address {:?} for clh device add",
bdf
));
}
let toks: Vec<&str> = tokens[2].split('.').collect();
if toks.len() != 2 || toks[1] != "0" || toks[0].len() != 2 {
return Err(anyhow!(
"Unexpected PCI address {:?} for clh device add",
bdf
));
}
PciPath::convert_from_string(toks[0])
}
async fn handle_block_device(&mut self, device: BlockDevice) -> Result<DeviceType> {
let mut block_dev = device.clone();
let socket = self
.api_socket
.as_ref()
@ -272,9 +302,10 @@ impl CloudHypervisorInner {
let dev_info: PciDeviceInfo =
serde_json::from_str(detail.as_str()).map_err(|e| anyhow!(e))?;
self.device_ids.insert(device.device_id, dev_info.id);
block_dev.config.pci_path = Some(Self::clh_pci_info_to_path(dev_info.bdf.as_str())?);
}
Ok(())
Ok(DeviceType::Block(block_dev))
}
pub(crate) async fn get_shared_devices(

View File

@ -79,7 +79,7 @@ impl Hypervisor for CloudHypervisor {
inner.save_vm().await
}
async fn add_device(&self, device: DeviceType) -> Result<()> {
async fn add_device(&self, device: DeviceType) -> Result<DeviceType> {
let mut inner = self.inner.write().await;
inner.add_device(device).await
}

View File

@ -7,6 +7,7 @@
use crate::device::Device;
use crate::device::DeviceType;
use crate::Hypervisor as hypervisor;
use crate::PciPath;
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
@ -39,6 +40,9 @@ pub struct BlockConfig {
/// device path in guest
pub virt_path: String,
/// pci path is the slot at which the drive is attached
pub pci_path: Option<PciPath>,
/// device attach count
pub attach_count: u64,
@ -78,11 +82,20 @@ impl Device for BlockDevice {
{
return Ok(());
}
if let Err(e) = h.add_device(DeviceType::Block(self.clone())).await {
match h.add_device(DeviceType::Block(self.clone())).await {
Ok(dev) => {
// Update device info with the one received from device attach
if let DeviceType::Block(blk) = dev {
self.config = blk.config;
}
Ok(())
}
Err(e) => {
self.decrease_attach_count().await?;
return Err(e);
}
return Ok(());
}
}
async fn detach(&mut self, h: &dyn hypervisor) -> Result<Option<u64>> {

View File

@ -10,7 +10,7 @@ use crate::device::driver::vhost_user_blk::VhostUserBlkDevice;
use crate::{
BlockConfig, BlockDevice, HybridVsockConfig, HybridVsockDevice, Hypervisor as hypervisor,
NetworkConfig, NetworkDevice, ShareFsDevice, ShareFsDeviceConfig, ShareFsMountConfig,
ShareFsMountDevice, VfioConfig, VfioDevice, VhostUserConfig, VsockConfig, VsockDevice,
ShareFsMountDevice, VfioConfig, VfioDevice, VhostUserConfig, VsockConfig,
};
use anyhow::Result;
use async_trait::async_trait;
@ -31,7 +31,7 @@ pub enum DeviceConfig {
HybridVsockCfg(HybridVsockConfig),
}
#[derive(Debug)]
#[derive(Debug, Clone)]
pub enum DeviceType {
Block(BlockDevice),
VhostUserBlk(VhostUserBlkDevice),
@ -40,7 +40,6 @@ pub enum DeviceType {
ShareFs(ShareFsDevice),
ShareFsMount(ShareFsMountDevice),
HybridVsock(HybridVsockDevice),
Vsock(VsockDevice),
}
impl fmt::Display for DeviceType {

View File

@ -74,9 +74,6 @@ impl DragonballInner {
DeviceType::ShareFsMount(sharefs_mount) => self
.add_share_fs_mount(&sharefs_mount.config)
.context("add share fs mount"),
DeviceType::Vsock(_) => {
todo!()
}
}
}

View File

@ -92,9 +92,12 @@ impl Hypervisor for Dragonball {
inner.resize_vcpu(old_vcpus, new_vcpus).await
}
async fn add_device(&self, device: DeviceType) -> Result<()> {
async fn add_device(&self, device: DeviceType) -> Result<DeviceType> {
let mut inner = self.inner.write().await;
inner.add_device(device).await
match inner.add_device(device.clone()).await {
Ok(_) => Ok(device),
Err(err) => Err(err),
}
}
async fn remove_device(&self, device: DeviceType) -> Result<()> {

View File

@ -85,7 +85,7 @@ pub trait Hypervisor: std::fmt::Debug + Send + Sync {
async fn resize_vcpu(&self, old_vcpus: u32, new_vcpus: u32) -> Result<(u32, u32)>; // returns (old_vcpus, new_vcpus)
// device manager
async fn add_device(&self, device: DeviceType) -> Result<()>;
async fn add_device(&self, device: DeviceType) -> Result<DeviceType>;
async fn remove_device(&self, device: DeviceType) -> Result<()>;
// utils

View File

@ -146,9 +146,9 @@ use crate::device::DeviceType;
// device manager part of Hypervisor
impl QemuInner {
pub(crate) async fn add_device(&mut self, device: DeviceType) -> Result<()> {
pub(crate) async fn add_device(&mut self, device: DeviceType) -> Result<DeviceType> {
info!(sl!(), "QemuInner::add_device() {}", device);
todo!()
Ok(device)
}
pub(crate) async fn remove_device(&mut self, device: DeviceType) -> Result<()> {

View File

@ -74,7 +74,7 @@ impl Hypervisor for Qemu {
inner.save_vm().await
}
async fn add_device(&self, device: DeviceType) -> Result<()> {
async fn add_device(&self, device: DeviceType) -> Result<DeviceType> {
let mut inner = self.inner.write().await;
inner.add_device(device).await
}

View File

@ -89,9 +89,16 @@ pub(crate) async fn setup_inline_virtiofs(id: &str, h: &dyn Hypervisor) -> Resul
prefetch_list_path: None,
},
};
h.add_device(DeviceType::ShareFsMount(virtio_fs))
let result = h
.add_device(DeviceType::ShareFsMount(virtio_fs))
.await
.with_context(|| format!("fail to attach passthrough fs {:?}", source))
.with_context(|| format!("fail to attach passthrough fs {:?}", source));
match result {
Ok(_) => Ok(()),
Err(e) => Err(e),
}
}
pub async fn rafs_mount(