runtime-rs:support nydus v5 and v6

add nydus v5 snd v6 upport for container rootfs

Fixes:#5142
Signed-off-by: Zhongtao Hu <zhongtaohu.tim@linux.alibaba.com>
This commit is contained in:
Zhongtao Hu 2022-11-11 10:15:29 +08:00
parent 56641bc230
commit c46814b26a
17 changed files with 384 additions and 64 deletions

8
src/agent/Cargo.lock generated
View File

@ -86,6 +86,12 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "base64"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
[[package]] [[package]]
name = "bincode" name = "bincode"
version = "1.3.3" version = "1.3.3"
@ -725,6 +731,8 @@ dependencies = [
name = "kata-types" name = "kata-types"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow",
"base64",
"bitmask-enum", "bitmask-enum",
"byte-unit", "byte-unit",
"glob", "glob",

View File

@ -12,6 +12,8 @@ edition = "2018"
[dependencies] [dependencies]
bitmask-enum = "2.1.0" bitmask-enum = "2.1.0"
anyhow = "1.0"
base64 = "0.13.0"
byte-unit = "3.1.4" byte-unit = "3.1.4"
glob = "0.3.0" glob = "0.3.0"
lazy_static = "1.4.0" lazy_static = "1.4.0"

View File

@ -4,6 +4,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
use anyhow::{anyhow, Context, Result};
use std::path::PathBuf; use std::path::PathBuf;
/// Prefix to mark a volume as Kata special. /// Prefix to mark a volume as Kata special.
@ -68,10 +69,46 @@ pub fn is_kata_host_dir_volume(ty: &str) -> bool {
ty == KATA_HOST_DIR_VOLUME_TYPE ty == KATA_HOST_DIR_VOLUME_TYPE
} }
/// Nydus extra options
#[derive(Debug, serde::Deserialize)]
pub struct NydusExtraOptions {
/// source path
pub source: String,
/// nydus config
pub config: String,
/// snapshotter directory
#[serde(rename(deserialize = "snapshotdir"))]
pub snapshot_dir: String,
/// fs version
pub fs_version: String,
}
impl NydusExtraOptions {
/// Create Nydus extra options
pub fn new(mount: &Mount) -> Result<Self> {
let options: Vec<&str> = mount
.options
.iter()
.filter(|x| x.starts_with("extraoption="))
.map(|x| x.as_ref())
.collect();
if options.len() != 1 {
return Err(anyhow!(
"get_nydus_extra_options: Invalid nydus options: {:?}",
&mount.options
));
}
let config_raw_data = options[0].trim_start_matches("extraoption=");
let extra_options_buf =
base64::decode(config_raw_data).context("decode the nydus's base64 extraoption")?;
serde_json::from_slice(&extra_options_buf).context("deserialize nydus's extraoption")
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
#[test] #[test]
fn test_is_kata_special_volume() { fn test_is_kata_special_volume() {
assert!(is_kata_special_volume("kata:guest-mount:nfs")); assert!(is_kata_special_volume("kata:guest-mount:nfs"));
@ -85,4 +122,38 @@ mod tests {
assert!(!is_kata_guest_mount_volume("kata:guest-moun")); assert!(!is_kata_guest_mount_volume("kata:guest-moun"));
assert!(!is_kata_guest_mount_volume("Kata:guest-mount:nfs")); assert!(!is_kata_guest_mount_volume("Kata:guest-mount:nfs"));
} }
#[test]
fn test_get_nydus_extra_options_v5() {
let mut mount_info = Mount {
..Default::default()
};
mount_info.options = vec!["extraoption=eyJzb3VyY2UiOiIvdmFyL2xpYi9jb250YWluZXJkL2lvLmNvbnRhaW5lcmQuc25hcHNob3R0ZXIudjEubnlkdXMvc25hcHNob3RzLzkvZnMvaW1hZ2UvaW1hZ2UuYm9vdCIsImNvbmZpZyI6IntcImRldmljZVwiOntcImJhY2tlbmRcIjp7XCJ0eXBlXCI6XCJyZWdpc3RyeVwiLFwiY29uZmlnXCI6e1wicmVhZGFoZWFkXCI6ZmFsc2UsXCJob3N0XCI6XCJsb2NhbGhvc3Q6NTAwMFwiLFwicmVwb1wiOlwidWJ1bnR1LW55ZHVzXCIsXCJzY2hlbWVcIjpcImh0dHBcIixcInNraXBfdmVyaWZ5XCI6dHJ1ZSxcInByb3h5XCI6e1wiZmFsbGJhY2tcIjpmYWxzZX0sXCJ0aW1lb3V0XCI6NSxcImNvbm5lY3RfdGltZW91dFwiOjUsXCJyZXRyeV9saW1pdFwiOjJ9fSxcImNhY2hlXCI6e1widHlwZVwiOlwiYmxvYmNhY2hlXCIsXCJjb25maWdcIjp7XCJ3b3JrX2RpclwiOlwiL3Zhci9saWIvbnlkdXMvY2FjaGVcIixcImRpc2FibGVfaW5kZXhlZF9tYXBcIjpmYWxzZX19fSxcIm1vZGVcIjpcImRpcmVjdFwiLFwiZGlnZXN0X3ZhbGlkYXRlXCI6ZmFsc2UsXCJlbmFibGVfeGF0dHJcIjp0cnVlLFwiZnNfcHJlZmV0Y2hcIjp7XCJlbmFibGVcIjp0cnVlLFwicHJlZmV0Y2hfYWxsXCI6ZmFsc2UsXCJ0aHJlYWRzX2NvdW50XCI6NCxcIm1lcmdpbmdfc2l6ZVwiOjAsXCJiYW5kd2lkdGhfcmF0ZVwiOjB9LFwidHlwZVwiOlwiXCIsXCJpZFwiOlwiXCIsXCJkb21haW5faWRcIjpcIlwiLFwiY29uZmlnXCI6e1wiaWRcIjpcIlwiLFwiYmFja2VuZF90eXBlXCI6XCJcIixcImJhY2tlbmRfY29uZmlnXCI6e1wicmVhZGFoZWFkXCI6ZmFsc2UsXCJwcm94eVwiOntcImZhbGxiYWNrXCI6ZmFsc2V9fSxcImNhY2hlX3R5cGVcIjpcIlwiLFwiY2FjaGVfY29uZmlnXCI6e1wid29ya19kaXJcIjpcIlwifSxcIm1ldGFkYXRhX3BhdGhcIjpcIlwifX0iLCJzbmFwc2hvdGRpciI6Ii92YXIvbGliL2NvbnRhaW5lcmQvaW8uY29udGFpbmVyZC5zbmFwc2hvdHRlci52MS5ueWR1cy9zbmFwc2hvdHMvMjU3IiwiZnNfdmVyc2lvbiI6InY1In0=".to_string()];
let extra_option_result = NydusExtraOptions::new(&mount_info);
assert!(extra_option_result.is_ok());
let extra_option = extra_option_result.unwrap();
assert_eq!(extra_option.source,"/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/9/fs/image/image.boot");
assert_eq!(
extra_option.snapshot_dir,
"/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/257"
);
assert_eq!(extra_option.fs_version, "v5");
}
#[test]
fn test_get_nydus_extra_options_v6() {
let mut mount_info = Mount {
..Default::default()
};
mount_info.options = vec!["extraoption=eyJzb3VyY2UiOiIvdmFyL2xpYi9jb250YWluZXJkL2lvLmNvbnRhaW5lcmQuc25hcHNob3R0ZXIudjEubnlkdXMvc25hcHNob3RzLzIwMS9mcy9pbWFnZS9pbWFnZS5ib290IiwiY29uZmlnIjoie1wiZGV2aWNlXCI6e1wiYmFja2VuZFwiOntcInR5cGVcIjpcInJlZ2lzdHJ5XCIsXCJjb25maWdcIjp7XCJyZWFkYWhlYWRcIjpmYWxzZSxcImhvc3RcIjpcImxvY2FsaG9zdDo1MDAwXCIsXCJyZXBvXCI6XCJ1YnVudHUtbnlkdXMtdjZcIixcInNjaGVtZVwiOlwiaHR0cFwiLFwic2tpcF92ZXJpZnlcIjp0cnVlLFwicHJveHlcIjp7XCJmYWxsYmFja1wiOmZhbHNlfSxcInRpbWVvdXRcIjo1LFwiY29ubmVjdF90aW1lb3V0XCI6NSxcInJldHJ5X2xpbWl0XCI6Mn19LFwiY2FjaGVcIjp7XCJ0eXBlXCI6XCJibG9iY2FjaGVcIixcImNvbmZpZ1wiOntcIndvcmtfZGlyXCI6XCIvdmFyL2xpYi9ueWR1cy9jYWNoZVwiLFwiZGlzYWJsZV9pbmRleGVkX21hcFwiOmZhbHNlfX19LFwibW9kZVwiOlwiZGlyZWN0XCIsXCJkaWdlc3RfdmFsaWRhdGVcIjpmYWxzZSxcImVuYWJsZV94YXR0clwiOnRydWUsXCJmc19wcmVmZXRjaFwiOntcImVuYWJsZVwiOnRydWUsXCJwcmVmZXRjaF9hbGxcIjpmYWxzZSxcInRocmVhZHNfY291bnRcIjo0LFwibWVyZ2luZ19zaXplXCI6MCxcImJhbmR3aWR0aF9yYXRlXCI6MH0sXCJ0eXBlXCI6XCJcIixcImlkXCI6XCJcIixcImRvbWFpbl9pZFwiOlwiXCIsXCJjb25maWdcIjp7XCJpZFwiOlwiXCIsXCJiYWNrZW5kX3R5cGVcIjpcIlwiLFwiYmFja2VuZF9jb25maWdcIjp7XCJyZWFkYWhlYWRcIjpmYWxzZSxcInByb3h5XCI6e1wiZmFsbGJhY2tcIjpmYWxzZX19LFwiY2FjaGVfdHlwZVwiOlwiXCIsXCJjYWNoZV9jb25maWdcIjp7XCJ3b3JrX2RpclwiOlwiXCJ9LFwibWV0YWRhdGFfcGF0aFwiOlwiXCJ9fSIsInNuYXBzaG90ZGlyIjoiL3Zhci9saWIvY29udGFpbmVyZC9pby5jb250YWluZXJkLnNuYXBzaG90dGVyLnYxLm55ZHVzL3NuYXBzaG90cy8yNjEiLCJmc192ZXJzaW9uIjoidjYifQ==".to_string()];
let extra_option_result = NydusExtraOptions::new(&mount_info);
assert!(extra_option_result.is_ok());
let extra_option = extra_option_result.unwrap();
assert_eq!(extra_option.source,"/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/201/fs/image/image.boot");
assert_eq!(
extra_option.snapshot_dir,
"/var/lib/containerd/io.containerd.snapshotter.v1.nydus/snapshots/261"
);
assert_eq!(extra_option.fs_version, "v6");
}
} }

View File

@ -223,6 +223,7 @@ impl DragonballInner {
}, },
cache_size: (self.config.shared_fs.virtio_fs_cache_size as u64) cache_size: (self.config.shared_fs.virtio_fs_cache_size as u64)
.saturating_mul(MB_TO_B as u64), .saturating_mul(MB_TO_B as u64),
xattr: true,
..Default::default() ..Default::default()
}; };
self.do_add_fs_device(&config.fs_type, &mut fs_cfg) self.do_add_fs_device(&config.fs_type, &mut fs_cfg)
@ -264,7 +265,7 @@ impl DragonballInner {
fstype: Some(fstype.to_string()), fstype: Some(fstype.to_string()),
source: Some(config.source.clone()), source: Some(config.source.clone()),
mountpoint: config.mount_point.clone(), mountpoint: config.mount_point.clone(),
config: None, config: config.config.clone(),
tag: config.tag.clone(), tag: config.tag.clone(),
prefetch_list_path: config.prefetch_list_path.clone(), prefetch_list_path: config.prefetch_list_path.clone(),
dax_threshold_size_kb: None, dax_threshold_size_kb: None,

View File

@ -23,6 +23,7 @@ rand = "^0.7.2"
rtnetlink = "0.11.0" rtnetlink = "0.11.0"
scopeguard = "1.0.0" scopeguard = "1.0.0"
serde = { version = "1.0.138", features = ["derive"] } serde = { version = "1.0.138", features = ["derive"] }
serde_json = "1.0.82"
slog = "2.5.2" slog = "2.5.2"
slog-scope = "4.4.0" slog-scope = "4.4.0"
tokio = { version = "1.8.0", features = ["process"] } tokio = { version = "1.8.0", features = ["process"] }

View File

@ -178,7 +178,14 @@ impl ResourceManagerInner {
rootfs_mounts: &[Mount], rootfs_mounts: &[Mount],
) -> Result<Arc<dyn Rootfs>> { ) -> Result<Arc<dyn Rootfs>> {
self.rootfs_resource self.rootfs_resource
.handler_rootfs(&self.share_fs, cid, bundle_path, rootfs_mounts) .handler_rootfs(
&self.share_fs,
self.hypervisor.as_ref(),
&self.sid,
cid,
bundle_path,
rootfs_mounts,
)
.await .await
} }

View File

@ -4,23 +4,29 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
mod nydus_rootfs;
mod share_fs_rootfs; mod share_fs_rootfs;
use std::{sync::Arc, vec::Vec}; use agent::Storage;
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
use async_trait::async_trait; use async_trait::async_trait;
use hypervisor::Hypervisor;
use kata_types::mount::Mount; use kata_types::mount::Mount;
use std::{sync::Arc, vec::Vec};
use tokio::sync::RwLock; use tokio::sync::RwLock;
use crate::share_fs::ShareFs; use crate::share_fs::ShareFs;
const ROOTFS: &str = "rootfs"; use self::nydus_rootfs::NYDUS_ROOTFS_TYPE;
const ROOTFS: &str = "rootfs";
const HYBRID_ROOTFS_LOWER_DIR: &str = "rootfs_lower";
const TYPE_OVERLAY_FS: &str = "overlay";
#[async_trait] #[async_trait]
pub trait Rootfs: Send + Sync { pub trait Rootfs: Send + Sync {
async fn get_guest_rootfs_path(&self) -> Result<String>; async fn get_guest_rootfs_path(&self) -> Result<String>;
async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>>; async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>>;
async fn get_storage(&self) -> Option<Storage>;
} }
#[derive(Default)] #[derive(Default)]
@ -48,6 +54,8 @@ impl RootFsResource {
pub async fn handler_rootfs( pub async fn handler_rootfs(
&self, &self,
share_fs: &Option<Arc<dyn ShareFs>>, share_fs: &Option<Arc<dyn ShareFs>>,
hypervisor: &dyn Hypervisor,
sid: &str,
cid: &str, cid: &str,
bundle_path: &str, bundle_path: &str,
rootfs_mounts: &[Mount], rootfs_mounts: &[Mount],
@ -56,21 +64,41 @@ impl RootFsResource {
mounts_vec if is_single_layer_rootfs(mounts_vec) => { mounts_vec if is_single_layer_rootfs(mounts_vec) => {
// Safe as single_layer_rootfs must have one layer // Safe as single_layer_rootfs must have one layer
let layer = &mounts_vec[0]; let layer = &mounts_vec[0];
let rootfs: Arc<dyn Rootfs> = if let Some(share_fs) = share_fs {
let rootfs = if let Some(share_fs) = share_fs {
// share fs rootfs
let share_fs_mount = share_fs.get_share_fs_mount(); let share_fs_mount = share_fs.get_share_fs_mount();
share_fs_rootfs::ShareFsRootfs::new(&share_fs_mount, cid, bundle_path, layer) // nydus rootfs
.await if layer.fs_type == NYDUS_ROOTFS_TYPE {
.context("new share fs rootfs")? Arc::new(
nydus_rootfs::NydusRootfs::new(
&share_fs_mount,
hypervisor,
sid,
cid,
layer,
)
.await
.context("new nydus rootfs")?,
)
} else {
// share fs rootfs
Arc::new(
share_fs_rootfs::ShareFsRootfs::new(
&share_fs_mount,
cid,
bundle_path,
layer,
)
.await
.context("new share fs rootfs")?,
)
}
} else { } else {
return Err(anyhow!("unsupported rootfs {:?}", &layer)); return Err(anyhow!("unsupported rootfs {:?}", &layer));
}; };
let mut inner = self.inner.write().await; let mut inner = self.inner.write().await;
let r = Arc::new(rootfs); inner.rootfs.push(Arc::clone(&rootfs));
inner.rootfs.push(r.clone()); Ok(rootfs)
Ok(r)
} }
_ => { _ => {
return Err(anyhow!( return Err(anyhow!(

View File

@ -0,0 +1,146 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::{fs, sync::Arc};
use super::{Rootfs, TYPE_OVERLAY_FS};
use crate::{
rootfs::{HYBRID_ROOTFS_LOWER_DIR, ROOTFS},
share_fs::{
do_get_guest_path, do_get_guest_share_path, get_host_rw_shared_path, rafs_mount,
ShareFsMount, ShareFsRootfsConfig, PASSTHROUGH_FS_DIR,
},
};
use agent::Storage;
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use hypervisor::Hypervisor;
use kata_types::mount::{Mount, NydusExtraOptions};
// Used for nydus rootfs
pub(crate) const NYDUS_ROOTFS_TYPE: &str = "fuse.nydus-overlayfs";
// Used for Nydus v5 rootfs version
const NYDUS_ROOTFS_V5: &str = "v5";
// Used for Nydus v6 rootfs version
const NYDUS_ROOTFS_V6: &str = "v6";
const SNAPSHOT_DIR: &str = "snapshotdir";
const KATA_OVERLAY_DEV_TYPE: &str = "overlayfs";
pub(crate) struct NydusRootfs {
guest_path: String,
rootfs: Storage,
}
impl NydusRootfs {
pub async fn new(
share_fs_mount: &Arc<dyn ShareFsMount>,
h: &dyn Hypervisor,
sid: &str,
cid: &str,
rootfs: &Mount,
) -> Result<Self> {
let extra_options =
NydusExtraOptions::new(rootfs).context("failed to parse nydus extra options")?;
info!(sl!(), "extra_option {:?}", &extra_options);
let rafs_meta = &extra_options.source;
let (rootfs_storage, rootfs_guest_path) = match extra_options.fs_version.as_str() {
// both nydus v5 and v6 can be handled by the builtin nydus in dragonball by using the rafs mode.
// nydus v6 could also be handled by the guest kernel as well, but some kernel patch is not support in the upstream community. We will add an option to let runtime-rs handle nydus v6 in the guest kernel optionally once the patch is ready
// see this issue (https://github.com/kata-containers/kata-containers/issues/5143)
NYDUS_ROOTFS_V5 | NYDUS_ROOTFS_V6 => {
// rafs mount the metadata of nydus rootfs
let rafs_mnt = do_get_guest_share_path(HYBRID_ROOTFS_LOWER_DIR, cid, true);
rafs_mount(
h,
rafs_meta.to_string(),
rafs_mnt,
extra_options.config.clone(),
None,
)
.await
.context("failed to do rafs mount")?;
// create rootfs under the share directory
let container_share_dir = get_host_rw_shared_path(sid)
.join(PASSTHROUGH_FS_DIR)
.join(cid);
let rootfs_dir = container_share_dir.join(ROOTFS);
fs::create_dir_all(rootfs_dir).context("failed to create directory")?;
// mount point inside the guest
let rootfs_guest_path = do_get_guest_path(ROOTFS, cid, false, false);
// bind mount the snapshot dir under the share directory
share_fs_mount
.share_rootfs(ShareFsRootfsConfig {
cid: cid.to_string(),
source: extra_options.snapshot_dir.clone(),
target: SNAPSHOT_DIR.to_string(),
readonly: true,
is_rafs: false,
})
.await
.context("share nydus rootfs")?;
let mut options: Vec<String> = Vec::new();
options.push(
"lowerdir=".to_string()
+ &do_get_guest_path(HYBRID_ROOTFS_LOWER_DIR, cid, false, true),
);
options.push(
"workdir=".to_string()
+ &do_get_guest_path(
format!("{}/{}", SNAPSHOT_DIR, "work").as_str(),
cid,
false,
false,
),
);
options.push(
"upperdir=".to_string()
+ &do_get_guest_path(
format!("{}/{}", SNAPSHOT_DIR, "fs").as_str(),
cid,
false,
false,
),
);
options.push("index=off".to_string());
Ok((
Storage {
driver: KATA_OVERLAY_DEV_TYPE.to_string(),
source: TYPE_OVERLAY_FS.to_string(),
fs_type: TYPE_OVERLAY_FS.to_string(),
options,
mount_point: rootfs_guest_path.clone(),
..Default::default()
},
rootfs_guest_path,
))
}
_ => {
let errstr: &str = "new_nydus_rootfs: invalid nydus rootfs type";
error!(sl!(), "{}", errstr);
Err(anyhow!(errstr))
}
}?;
Ok(NydusRootfs {
guest_path: rootfs_guest_path,
rootfs: rootfs_storage,
})
}
}
#[async_trait]
impl Rootfs for NydusRootfs {
async fn get_guest_rootfs_path(&self) -> Result<String> {
Ok(self.guest_path.clone())
}
async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>> {
Ok(vec![])
}
async fn get_storage(&self) -> Option<Storage> {
Some(self.rootfs.clone())
}
}

View File

@ -4,12 +4,12 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
use std::sync::Arc; use agent::Storage;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use async_trait::async_trait; use async_trait::async_trait;
use kata_sys_util::mount::Mounter; use kata_sys_util::mount::Mounter;
use kata_types::mount::Mount; use kata_types::mount::Mount;
use std::sync::Arc;
use super::{Rootfs, ROOTFS}; use super::{Rootfs, ROOTFS};
use crate::share_fs::{ShareFsMount, ShareFsRootfsConfig}; use crate::share_fs::{ShareFsMount, ShareFsRootfsConfig};
@ -37,6 +37,7 @@ impl ShareFsRootfs {
source: bundle_rootfs.to_string(), source: bundle_rootfs.to_string(),
target: ROOTFS.to_string(), target: ROOTFS.to_string(),
readonly: false, readonly: false,
is_rafs: false,
}) })
.await .await
.context("share rootfs")?; .context("share rootfs")?;
@ -56,4 +57,8 @@ impl Rootfs for ShareFsRootfs {
async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>> { async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>> {
todo!() todo!()
} }
async fn get_storage(&self) -> Option<Storage> {
None
}
} }

View File

@ -5,11 +5,13 @@
// //
mod share_virtio_fs; mod share_virtio_fs;
pub use share_virtio_fs::rafs_mount;
mod share_virtio_fs_inline; mod share_virtio_fs_inline;
use share_virtio_fs_inline::ShareVirtioFsInline; use share_virtio_fs_inline::ShareVirtioFsInline;
mod share_virtio_fs_standalone; mod share_virtio_fs_standalone;
use share_virtio_fs_standalone::ShareVirtioFsStandalone; use share_virtio_fs_standalone::ShareVirtioFsStandalone;
mod utils; mod utils;
pub use utils::{do_get_guest_path, do_get_guest_share_path, get_host_rw_shared_path};
mod virtio_fs_share_mount; mod virtio_fs_share_mount;
use virtio_fs_share_mount::VirtiofsShareMount; use virtio_fs_share_mount::VirtiofsShareMount;
@ -22,6 +24,7 @@ use hypervisor::Hypervisor;
use kata_types::config::hypervisor::SharedFsInfo; use kata_types::config::hypervisor::SharedFsInfo;
const VIRTIO_FS: &str = "virtio-fs"; const VIRTIO_FS: &str = "virtio-fs";
const _VIRTIO_FS_NYDUS: &str = "virtio-fs-nydus";
const INLINE_VIRTIO_FS: &str = "inline-virtio-fs"; const INLINE_VIRTIO_FS: &str = "inline-virtio-fs";
const KATA_HOST_SHARED_DIR: &str = "/run/kata-containers/shared/sandboxes/"; const KATA_HOST_SHARED_DIR: &str = "/run/kata-containers/shared/sandboxes/";
@ -31,7 +34,8 @@ const KATA_GUEST_SHARE_DIR: &str = "/run/kata-containers/shared/containers/";
pub(crate) const DEFAULT_KATA_GUEST_SANDBOX_DIR: &str = "/run/kata-containers/sandbox/"; pub(crate) const DEFAULT_KATA_GUEST_SANDBOX_DIR: &str = "/run/kata-containers/sandbox/";
const PASSTHROUGH_FS_DIR: &str = "passthrough"; pub const PASSTHROUGH_FS_DIR: &str = "passthrough";
const RAFS_DIR: &str = "rafs";
#[async_trait] #[async_trait]
pub trait ShareFs: Send + Sync { pub trait ShareFs: Send + Sync {
@ -47,6 +51,7 @@ pub struct ShareFsRootfsConfig {
pub source: String, pub source: String,
pub target: String, pub target: String,
pub readonly: bool, pub readonly: bool,
pub is_rafs: bool,
} }
pub struct ShareFsVolumeConfig { pub struct ShareFsVolumeConfig {
@ -56,6 +61,7 @@ pub struct ShareFsVolumeConfig {
pub readonly: bool, pub readonly: bool,
pub mount_options: Vec<String>, pub mount_options: Vec<String>,
pub mount: oci::Mount, pub mount: oci::Mount,
pub is_rafs: bool,
} }
pub struct ShareFsMountResult { pub struct ShareFsMountResult {

View File

@ -7,10 +7,13 @@
use std::path::Path; use std::path::Path;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use hypervisor::{device, Hypervisor}; use hypervisor::{
device::{Device as HypervisorDevice, ShareFsMountConfig, ShareFsMountType, ShareFsOperation},
Hypervisor, ShareFsDeviceConfig,
};
use kata_sys_util::mount; use kata_sys_util::mount;
use super::utils; use super::{utils, PASSTHROUGH_FS_DIR};
pub(crate) const MOUNT_GUEST_TAG: &str = "kataShared"; pub(crate) const MOUNT_GUEST_TAG: &str = "kataShared";
@ -39,7 +42,7 @@ pub(crate) async fn prepare_virtiofs(
mount::bind_mount_unchecked(&host_rw_dest, &host_ro_dest, true) mount::bind_mount_unchecked(&host_rw_dest, &host_ro_dest, true)
.context("bind mount shared_fs directory")?; .context("bind mount shared_fs directory")?;
let share_fs_device = device::Device::ShareFsDevice(device::ShareFsDeviceConfig { let share_fs_device = HypervisorDevice::ShareFsDevice(ShareFsDeviceConfig {
sock_path: generate_sock_path(root), sock_path: generate_sock_path(root),
mount_tag: String::from(MOUNT_GUEST_TAG), mount_tag: String::from(MOUNT_GUEST_TAG),
host_path: String::from(host_ro_dest.to_str().unwrap()), host_path: String::from(host_ro_dest.to_str().unwrap()),
@ -50,3 +53,55 @@ pub(crate) async fn prepare_virtiofs(
h.add_device(share_fs_device).await.context("add device")?; h.add_device(share_fs_device).await.context("add device")?;
Ok(()) Ok(())
} }
pub(crate) async fn setup_inline_virtiofs(id: &str, h: &dyn Hypervisor) -> Result<()> {
// - source is the absolute path of PASSTHROUGH_FS_DIR on host, e.g.
// /run/kata-containers/shared/sandboxes/<sid>/passthrough
// - mount point is the path relative to KATA_GUEST_SHARE_DIR in guest
let mnt = format!("/{}", PASSTHROUGH_FS_DIR);
let rw_source = utils::get_host_rw_shared_path(id).join(PASSTHROUGH_FS_DIR);
utils::ensure_dir_exist(&rw_source).context("ensure directory exist")?;
let ro_source = utils::get_host_ro_shared_path(id).join(PASSTHROUGH_FS_DIR);
let source = String::from(ro_source.to_str().unwrap());
let virtio_fs = HypervisorDevice::ShareFsMount(ShareFsMountConfig {
source: source.clone(),
fstype: ShareFsMountType::PASSTHROUGH,
mount_point: mnt,
config: None,
tag: String::from(MOUNT_GUEST_TAG),
op: ShareFsOperation::Mount,
prefetch_list_path: None,
});
h.add_device(virtio_fs)
.await
.with_context(|| format!("fail to attach passthrough fs {:?}", source))
}
pub async fn rafs_mount(
h: &dyn Hypervisor,
rafs_meta: String,
rafs_mnt: String,
config_content: String,
prefetch_list_path: Option<String>,
) -> Result<()> {
info!(
sl!(),
"Attaching rafs meta file {} to virtio-fs device, rafs mount point {}", rafs_meta, rafs_mnt
);
let virtio_fs = HypervisorDevice::ShareFsMount(ShareFsMountConfig {
source: rafs_meta.clone(),
fstype: ShareFsMountType::RAFS,
mount_point: rafs_mnt,
config: Some(config_content),
tag: String::from(MOUNT_GUEST_TAG),
op: ShareFsOperation::Mount,
prefetch_list_path,
});
h.add_device(virtio_fs)
.await
.with_context(|| format!("fail to attach rafs {:?}", rafs_meta))?;
Ok(())
}

View File

@ -7,17 +7,15 @@
use agent::Storage; use agent::Storage;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use async_trait::async_trait; use async_trait::async_trait;
use hypervisor::{ use hypervisor::Hypervisor;
device::{Device as HypervisorDevice, ShareFsMountConfig, ShareFsMountType, ShareFsOperation},
Hypervisor,
};
use kata_types::config::hypervisor::SharedFsInfo; use kata_types::config::hypervisor::SharedFsInfo;
use super::{ use super::{
share_virtio_fs::{ share_virtio_fs::{
prepare_virtiofs, FS_TYPE_VIRTIO_FS, KATA_VIRTIO_FS_DEV_TYPE, MOUNT_GUEST_TAG, prepare_virtiofs, setup_inline_virtiofs, FS_TYPE_VIRTIO_FS, KATA_VIRTIO_FS_DEV_TYPE,
MOUNT_GUEST_TAG,
}, },
utils, ShareFs, PASSTHROUGH_FS_DIR, *, ShareFs, *,
}; };
lazy_static! { lazy_static! {
@ -80,29 +78,3 @@ impl ShareFs for ShareVirtioFsInline {
Ok(storages) Ok(storages)
} }
} }
async fn setup_inline_virtiofs(id: &str, h: &dyn Hypervisor) -> Result<()> {
// - source is the absolute path of PASSTHROUGH_FS_DIR on host, e.g.
// /run/kata-containers/shared/sandboxes/<sid>/passthrough
// - mount point is the path relative to KATA_GUEST_SHARE_DIR in guest
let mnt = format!("/{}", PASSTHROUGH_FS_DIR);
let rw_source = utils::get_host_rw_shared_path(id).join(PASSTHROUGH_FS_DIR);
utils::ensure_dir_exist(&rw_source)?;
let ro_source = utils::get_host_ro_shared_path(id).join(PASSTHROUGH_FS_DIR);
let source = String::from(ro_source.to_str().unwrap());
let virtio_fs = HypervisorDevice::ShareFsMount(ShareFsMountConfig {
source: source.clone(),
fstype: ShareFsMountType::PASSTHROUGH,
mount_point: mnt,
config: None,
tag: String::from(MOUNT_GUEST_TAG),
op: ShareFsOperation::Mount,
prefetch_list_path: None,
});
h.add_device(virtio_fs)
.await
.context(format!("fail to attach passthrough fs {:?}", source))
}

View File

@ -27,6 +27,7 @@ pub(crate) fn share_to_guest(
cid: &str, cid: &str,
readonly: bool, readonly: bool,
is_volume: bool, is_volume: bool,
is_rafs: bool,
) -> Result<String> { ) -> Result<String> {
let host_dest = do_get_host_path(target, sid, cid, is_volume, false); let host_dest = do_get_host_path(target, sid, cid, is_volume, false);
mount::bind_mount_unchecked(source, &host_dest, readonly) mount::bind_mount_unchecked(source, &host_dest, readonly)
@ -39,7 +40,7 @@ pub(crate) fn share_to_guest(
mount::bind_remount_read_only(&dst).context("bind remount readonly")?; mount::bind_remount_read_only(&dst).context("bind remount readonly")?;
} }
Ok(do_get_guest_path(target, cid, is_volume)) Ok(do_get_guest_path(target, cid, is_volume, is_rafs))
} }
// Shared path handling: // Shared path handling:
// 1. create two directories for each sandbox: // 1. create two directories for each sandbox:
@ -53,12 +54,22 @@ pub(crate) fn get_host_ro_shared_path(id: &str) -> PathBuf {
Path::new(KATA_HOST_SHARED_DIR).join(id).join("ro") Path::new(KATA_HOST_SHARED_DIR).join(id).join("ro")
} }
pub(crate) fn get_host_rw_shared_path(sid: &str) -> PathBuf { pub fn get_host_rw_shared_path(sid: &str) -> PathBuf {
Path::new(KATA_HOST_SHARED_DIR).join(sid).join("rw") Path::new(KATA_HOST_SHARED_DIR).join(sid).join("rw")
} }
fn do_get_guest_any_path(target: &str, cid: &str, is_volume: bool, is_virtiofs: bool) -> String { fn do_get_guest_any_path(
let dir = PASSTHROUGH_FS_DIR; target: &str,
cid: &str,
is_volume: bool,
is_rafs: bool,
is_virtiofs: bool,
) -> String {
let dir = if is_rafs {
RAFS_DIR
} else {
PASSTHROUGH_FS_DIR
};
let guest_share_dir = if is_virtiofs { let guest_share_dir = if is_virtiofs {
Path::new("/").to_path_buf() Path::new("/").to_path_buf()
} else { } else {
@ -73,8 +84,12 @@ fn do_get_guest_any_path(target: &str, cid: &str, is_volume: bool, is_virtiofs:
path.to_str().unwrap().to_string() path.to_str().unwrap().to_string()
} }
pub(crate) fn do_get_guest_path(target: &str, cid: &str, is_volume: bool) -> String { pub fn do_get_guest_path(target: &str, cid: &str, is_volume: bool, is_rafs: bool) -> String {
do_get_guest_any_path(target, cid, is_volume, false) do_get_guest_any_path(target, cid, is_volume, is_rafs, false)
}
pub fn do_get_guest_share_path(target: &str, cid: &str, is_rafs: bool) -> String {
do_get_guest_any_path(target, cid, false, is_rafs, true)
} }
pub(crate) fn do_get_host_path( pub(crate) fn do_get_host_path(

View File

@ -44,6 +44,7 @@ impl ShareFsMount for VirtiofsShareMount {
&config.cid, &config.cid,
config.readonly, config.readonly,
false, false,
config.is_rafs,
) )
.context("share to guest")?; .context("share to guest")?;
Ok(ShareFsMountResult { Ok(ShareFsMountResult {
@ -60,6 +61,7 @@ impl ShareFsMount for VirtiofsShareMount {
&config.cid, &config.cid,
config.readonly, config.readonly,
true, true,
config.is_rafs,
) )
.context("share to guest")?; .context("share to guest")?;

View File

@ -68,6 +68,7 @@ impl ShareFsVolume {
readonly: m.options.iter().any(|o| *o == "ro"), readonly: m.options.iter().any(|o| *o == "ro"),
mount_options: m.options.clone(), mount_options: m.options.clone(),
mount: m.clone(), mount: m.clone(),
is_rafs: false,
}) })
.await .await
.context("share fs volume")?; .context("share fs volume")?;

View File

@ -85,9 +85,7 @@ async fn generic_ip_table_handler(
let body = Body::from(data); let body = Body::from(data);
Response::builder().body(body).map_err(|e| anyhow!(e)) Response::builder().body(body).map_err(|e| anyhow!(e))
} }
_ => { _ => Err(anyhow!("Failed to get iptable")),
Err(anyhow!("Failed to get iptable"))
}
}, },
Method::PUT => { Method::PUT => {

View File

@ -101,6 +101,10 @@ impl Container {
} }
None => return Err(anyhow!("spec miss root field")), None => return Err(anyhow!("spec miss root field")),
}; };
let mut storages = vec![];
if let Some(storage) = rootfs.get_storage().await {
storages.push(storage);
}
inner.rootfs.push(rootfs); inner.rootfs.push(rootfs);
// handler volumes // handler volumes
@ -110,8 +114,6 @@ impl Container {
.await .await
.context("handler volumes")?; .context("handler volumes")?;
let mut oci_mounts = vec![]; let mut oci_mounts = vec![];
let mut storages = vec![];
for v in volumes { for v in volumes {
let mut volume_mounts = v.get_volume_mount().context("get volume mount")?; let mut volume_mounts = v.get_volume_mount().context("get volume mount")?;
if !volume_mounts.is_empty() { if !volume_mounts.is_empty() {