mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-31 23:36:12 +00:00
runtime-rs: block volume
support block volume in runtime-rs Fixes: #5375 Signed-off-by: Zhongtao Hu <zhongtaohu.tim@linux.alibaba.com> Signed-off-by: alex.lyn <alex.lyn@antgroup.com>
This commit is contained in:
parent
a8bfac90b1
commit
fe9ec67644
@ -236,7 +236,13 @@ impl ResourceManagerInner {
|
||||
spec: &oci::Spec,
|
||||
) -> Result<Vec<Arc<dyn Volume>>> {
|
||||
self.volume_resource
|
||||
.handler_volumes(&self.share_fs, cid, spec)
|
||||
.handler_volumes(
|
||||
&self.share_fs,
|
||||
cid,
|
||||
spec,
|
||||
self.device_manager.as_ref(),
|
||||
&self.sid,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
|
@ -6,27 +6,126 @@
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use std::{collections::HashMap, fs, path::Path};
|
||||
|
||||
use super::Volume;
|
||||
use crate::share_fs::{do_get_guest_path, do_get_host_path};
|
||||
|
||||
use super::{share_fs_volume::generate_mount_path, Volume};
|
||||
use agent::Storage;
|
||||
use anyhow::{anyhow, Context};
|
||||
use hypervisor::{device::DeviceManager, BlockConfig, DeviceConfig};
|
||||
use nix::sys::stat::{self, SFlag};
|
||||
use tokio::sync::RwLock;
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct BlockVolume {}
|
||||
pub(crate) struct BlockVolume {
|
||||
storage: Option<agent::Storage>,
|
||||
mount: oci::Mount,
|
||||
device_id: String,
|
||||
}
|
||||
|
||||
/// BlockVolume: block device volume
|
||||
impl BlockVolume {
|
||||
pub(crate) fn new(_m: &oci::Mount) -> Result<Self> {
|
||||
Ok(Self {})
|
||||
pub(crate) async fn new(
|
||||
d: &RwLock<DeviceManager>,
|
||||
m: &oci::Mount,
|
||||
read_only: bool,
|
||||
cid: &str,
|
||||
sid: &str,
|
||||
) -> Result<Self> {
|
||||
let fstat = stat::stat(m.source.as_str()).context(format!("stat {}", m.source))?;
|
||||
info!(sl!(), "device stat: {:?}", fstat);
|
||||
let mut options = HashMap::new();
|
||||
if read_only {
|
||||
options.insert("read_only".to_string(), "true".to_string());
|
||||
}
|
||||
|
||||
let block_device_config = &mut BlockConfig {
|
||||
major: stat::major(fstat.st_rdev) as i64,
|
||||
minor: stat::minor(fstat.st_rdev) as i64,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let device_id = d
|
||||
.write()
|
||||
.await
|
||||
.new_device(&DeviceConfig::Block(block_device_config.clone()))
|
||||
.await
|
||||
.context("failed to create deviec")?;
|
||||
|
||||
d.write()
|
||||
.await
|
||||
.try_add_device(device_id.as_str())
|
||||
.await
|
||||
.context("failed to add deivce")?;
|
||||
|
||||
let file_name = Path::new(&m.source).file_name().unwrap().to_str().unwrap();
|
||||
let file_name = generate_mount_path(cid, file_name);
|
||||
let guest_path = do_get_guest_path(&file_name, cid, true, false);
|
||||
let host_path = do_get_host_path(&file_name, sid, cid, true, read_only);
|
||||
fs::create_dir_all(&host_path)
|
||||
.map_err(|e| anyhow!("failed to create rootfs dir {}: {:?}", host_path, e))?;
|
||||
|
||||
// get complete device information
|
||||
let dev_info = d
|
||||
.read()
|
||||
.await
|
||||
.get_device_info(&device_id)
|
||||
.await
|
||||
.context("failed to get device info")?;
|
||||
|
||||
// storage
|
||||
let mut storage = Storage::default();
|
||||
|
||||
if let DeviceConfig::Block(config) = dev_info {
|
||||
storage.driver = config.driver_option;
|
||||
storage.source = config.virt_path;
|
||||
}
|
||||
|
||||
storage.options = if read_only {
|
||||
vec!["ro".to_string()]
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
storage.mount_point = guest_path.clone();
|
||||
|
||||
// If the volume had specified the filesystem type, use it. Otherwise, set it
|
||||
// to ext4 since but right now we only support it.
|
||||
if m.r#type != "bind" {
|
||||
storage.fs_type = m.r#type.clone();
|
||||
} else {
|
||||
storage.fs_type = "ext4".to_string();
|
||||
}
|
||||
|
||||
// mount
|
||||
let mount = oci::Mount {
|
||||
destination: m.destination.clone(),
|
||||
r#type: m.r#type.clone(),
|
||||
source: guest_path.clone(),
|
||||
options: m.options.clone(),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
storage: Some(storage),
|
||||
mount,
|
||||
device_id,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Volume for BlockVolume {
|
||||
fn get_volume_mount(&self) -> anyhow::Result<Vec<oci::Mount>> {
|
||||
todo!()
|
||||
fn get_volume_mount(&self) -> Result<Vec<oci::Mount>> {
|
||||
Ok(vec![self.mount.clone()])
|
||||
}
|
||||
|
||||
fn get_storage(&self) -> Result<Vec<agent::Storage>> {
|
||||
todo!()
|
||||
let s = if let Some(s) = self.storage.as_ref() {
|
||||
vec![s.clone()]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
@ -34,9 +133,19 @@ impl Volume for BlockVolume {
|
||||
warn!(sl!(), "Cleaning up BlockVolume is still unimplemented.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_device_id(&self) -> Result<Option<String>> {
|
||||
Ok(Some(self.device_id.clone()))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_block_volume(_m: &oci::Mount) -> bool {
|
||||
// attach block device
|
||||
pub(crate) fn is_block_volume(m: &oci::Mount) -> bool {
|
||||
if m.r#type != "bind" {
|
||||
return false;
|
||||
}
|
||||
if let Ok(fstat) = stat::stat(m.source.as_str()).context(format!("stat {}", m.source)) {
|
||||
info!(sl!(), "device stat: {:?}", fstat);
|
||||
return SFlag::from_bits_truncate(fstat.st_mode) == SFlag::S_IFBLK;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
@ -38,4 +38,8 @@ impl Volume for DefaultVolume {
|
||||
warn!(sl!(), "Cleaning up DefaultVolume is still unimplemented.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_device_id(&self) -> Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
@ -91,6 +91,10 @@ impl Volume for Hugepage {
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_device_id(&self) -> Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_huge_page_option(m: &oci::Mount) -> Result<Option<Vec<String>>> {
|
||||
|
@ -12,10 +12,11 @@ mod shm_volume;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use hypervisor::device::DeviceManager;
|
||||
use std::{sync::Arc, vec::Vec};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::share_fs::ShareFs;
|
||||
use crate::{share_fs::ShareFs, volume::block_volume::is_block_volume};
|
||||
|
||||
use self::hugepage::{get_huge_page_limits_map, get_huge_page_option};
|
||||
|
||||
@ -25,6 +26,7 @@ const BIND: &str = "bind";
|
||||
pub trait Volume: Send + Sync {
|
||||
fn get_volume_mount(&self) -> Result<Vec<oci::Mount>>;
|
||||
fn get_storage(&self) -> Result<Vec<agent::Storage>>;
|
||||
fn get_device_id(&self) -> Result<Option<String>>;
|
||||
async fn cleanup(&self) -> Result<()>;
|
||||
}
|
||||
|
||||
@ -48,20 +50,25 @@ impl VolumeResource {
|
||||
share_fs: &Option<Arc<dyn ShareFs>>,
|
||||
cid: &str,
|
||||
spec: &oci::Spec,
|
||||
d: &RwLock<DeviceManager>,
|
||||
sid: &str,
|
||||
) -> Result<Vec<Arc<dyn Volume>>> {
|
||||
let mut volumes: Vec<Arc<dyn Volume>> = vec![];
|
||||
let oci_mounts = &spec.mounts;
|
||||
info!(sl!(), " oci mount is : {:?}", oci_mounts.clone());
|
||||
// handle mounts
|
||||
for m in oci_mounts {
|
||||
let read_only = m.options.iter().any(|opt| opt == "ro");
|
||||
let volume: Arc<dyn Volume> = if shm_volume::is_shim_volume(m) {
|
||||
let shm_size = shm_volume::DEFAULT_SHM_SIZE;
|
||||
Arc::new(
|
||||
shm_volume::ShmVolume::new(m, shm_size)
|
||||
.with_context(|| format!("new shm volume {:?}", m))?,
|
||||
)
|
||||
} else if share_fs_volume::is_share_fs_volume(m) {
|
||||
} else if is_block_volume(m) {
|
||||
// handle block volume
|
||||
Arc::new(
|
||||
share_fs_volume::ShareFsVolume::new(share_fs, m, cid)
|
||||
block_volume::BlockVolume::new(d, m, read_only, cid, sid)
|
||||
.await
|
||||
.with_context(|| format!("new share fs volume {:?}", m))?,
|
||||
)
|
||||
@ -76,10 +83,11 @@ impl VolumeResource {
|
||||
hugepage::Hugepage::new(m, hugepage_limits, options)
|
||||
.with_context(|| format!("handle hugepages {:?}", m))?,
|
||||
)
|
||||
} else if block_volume::is_block_volume(m) {
|
||||
} else if share_fs_volume::is_share_fs_volume(m) {
|
||||
Arc::new(
|
||||
block_volume::BlockVolume::new(m)
|
||||
.with_context(|| format!("new block volume {:?}", m))?,
|
||||
share_fs_volume::ShareFsVolume::new(share_fs, m, cid, read_only)
|
||||
.await
|
||||
.with_context(|| format!("new share fs volume {:?}", m))?,
|
||||
)
|
||||
} else if is_skip_volume(m) {
|
||||
info!(sl!(), "skip volume {:?}", m);
|
||||
|
@ -36,6 +36,7 @@ impl ShareFsVolume {
|
||||
share_fs: &Option<Arc<dyn ShareFs>>,
|
||||
m: &oci::Mount,
|
||||
cid: &str,
|
||||
readonly: bool,
|
||||
) -> Result<Self> {
|
||||
// The file_name is in the format of "sandbox-{uuid}-{file_name}"
|
||||
let file_name = Path::new(&m.source).file_name().unwrap().to_str().unwrap();
|
||||
@ -69,8 +70,6 @@ impl ShareFsVolume {
|
||||
}
|
||||
}
|
||||
Some(share_fs) => {
|
||||
let readonly = m.options.iter().any(|opt| opt == "ro");
|
||||
|
||||
let share_fs_mount = share_fs.get_share_fs_mount();
|
||||
let mounted_info_set = share_fs.mounted_info_set();
|
||||
let mut mounted_info_set = mounted_info_set.lock().await;
|
||||
@ -226,6 +225,10 @@ impl Volume for ShareFsVolume {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_device_id(&self) -> Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_share_fs_volume(m: &oci::Mount) -> bool {
|
||||
|
@ -104,6 +104,10 @@ impl Volume for ShmVolume {
|
||||
warn!(sl!(), "Cleaning up ShmVolume is still unimplemented.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_device_id(&self) -> Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_shim_volume(m: &oci::Mount) -> bool {
|
||||
|
Loading…
Reference in New Issue
Block a user