runtime-rs: Clean up mount points shared to guest

Fixed issues where shared volumes couldn't umount correctly.

The rootfs of each container is cleaned up after the container is killed, except
for `NydusRootfs`. `ShareFsRootfs::cleanup()` calls
`VirtiofsShareMount::umount_rootfs()` to umount mount points shared to the
guest, and umounts the bundle rootfs.

Fixes: #5898

Signed-off-by: Xuewei Niu <niuxuewei.nxw@antgroup.com>
This commit is contained in:
Xuewei Niu 2022-12-14 11:32:00 +08:00
parent e4645642d0
commit 0e69207909
7 changed files with 112 additions and 47 deletions

View File

@ -27,6 +27,7 @@ pub trait Rootfs: Send + Sync {
async fn get_guest_rootfs_path(&self) -> Result<String>;
async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>>;
async fn get_storage(&self) -> Option<Storage>;
async fn cleanup(&self) -> Result<()>;
}
#[derive(Default)]
@ -66,11 +67,10 @@ impl RootFsResource {
// if rootfs_mounts is empty
mounts_vec if mounts_vec.is_empty() => {
if let Some(share_fs) = share_fs {
let share_fs_mount = share_fs.get_share_fs_mount();
// share fs rootfs
Ok(Arc::new(
share_fs_rootfs::ShareFsRootfs::new(
&share_fs_mount,
share_fs,
cid,
root.path.as_str(),
None,
@ -86,17 +86,10 @@ impl RootFsResource {
// Safe as single_layer_rootfs must have one layer
let layer = &mounts_vec[0];
let rootfs: Arc<dyn Rootfs> = if let Some(share_fs) = share_fs {
let share_fs_mount = share_fs.get_share_fs_mount();
// nydus rootfs
if layer.fs_type == NYDUS_ROOTFS_TYPE {
Arc::new(
nydus_rootfs::NydusRootfs::new(
&share_fs_mount,
hypervisor,
sid,
cid,
layer,
)
nydus_rootfs::NydusRootfs::new(share_fs, hypervisor, sid, cid, layer)
.await
.context("new nydus rootfs")?,
)
@ -104,7 +97,7 @@ impl RootFsResource {
// share fs rootfs
Arc::new(
share_fs_rootfs::ShareFsRootfs::new(
&share_fs_mount,
share_fs,
cid,
bundle_path,
Some(layer),

View File

@ -9,8 +9,8 @@ use super::{Rootfs, TYPE_OVERLAY_FS};
use crate::{
rootfs::{HYBRID_ROOTFS_LOWER_DIR, ROOTFS},
share_fs::{
do_get_guest_path, do_get_guest_share_path, get_host_rw_shared_path, rafs_mount,
ShareFsMount, ShareFsRootfsConfig, PASSTHROUGH_FS_DIR,
do_get_guest_path, do_get_guest_share_path, get_host_rw_shared_path, rafs_mount, ShareFs,
ShareFsRootfsConfig, PASSTHROUGH_FS_DIR,
},
};
use agent::Storage;
@ -36,16 +36,25 @@ pub(crate) struct NydusRootfs {
impl NydusRootfs {
pub async fn new(
share_fs_mount: &Arc<dyn ShareFsMount>,
share_fs: &Arc<dyn ShareFs>,
h: &dyn Hypervisor,
sid: &str,
cid: &str,
rootfs: &Mount,
) -> Result<Self> {
let share_fs = Arc::clone(share_fs);
let share_fs_mount = share_fs.get_share_fs_mount();
let extra_options =
NydusExtraOptions::new(rootfs).context("failed to parse nydus extra options")?;
info!(sl!(), "extra_option {:?}", &extra_options);
let rafs_meta = &extra_options.source;
let config = ShareFsRootfsConfig {
cid: cid.to_string(),
source: extra_options.snapshot_dir.clone(),
target: SNAPSHOT_DIR.to_string(),
readonly: true,
is_rafs: false,
};
let (rootfs_storage, rootfs_guest_path) = match extra_options.fs_version.as_str() {
// both nydus v5 and v6 can be handled by the builtin nydus in dragonball by using the rafs mode.
// nydus v6 could also be handled by the guest kernel as well, but some kernel patch is not support in the upstream community. We will add an option to let runtime-rs handle nydus v6 in the guest kernel optionally once the patch is ready
@ -72,13 +81,7 @@ impl NydusRootfs {
let rootfs_guest_path = do_get_guest_path(ROOTFS, cid, false, false);
// bind mount the snapshot dir under the share directory
share_fs_mount
.share_rootfs(ShareFsRootfsConfig {
cid: cid.to_string(),
source: extra_options.snapshot_dir.clone(),
target: SNAPSHOT_DIR.to_string(),
readonly: true,
is_rafs: false,
})
.share_rootfs(config.clone())
.await
.context("share nydus rootfs")?;
let mut options: Vec<String> = Vec::new();
@ -143,4 +146,9 @@ impl Rootfs for NydusRootfs {
async fn get_storage(&self) -> Option<Storage> {
Some(self.rootfs.clone())
}
async fn cleanup(&self) -> Result<()> {
warn!(sl!(), "Cleaning up Nydus Rootfs is still unimplemented.");
Ok(())
}
}

View File

@ -7,20 +7,22 @@
use agent::Storage;
use anyhow::{Context, Result};
use async_trait::async_trait;
use kata_sys_util::mount::Mounter;
use kata_sys_util::mount::{umount_timeout, Mounter};
use kata_types::mount::Mount;
use std::sync::Arc;
use super::{Rootfs, ROOTFS};
use crate::share_fs::{ShareFsMount, ShareFsRootfsConfig};
use crate::share_fs::{ShareFs, ShareFsRootfsConfig};
pub(crate) struct ShareFsRootfs {
guest_path: String,
share_fs: Arc<dyn ShareFs>,
config: ShareFsRootfsConfig,
}
impl ShareFsRootfs {
pub async fn new(
share_fs_mount: &Arc<dyn ShareFsMount>,
share_fs: &Arc<dyn ShareFs>,
cid: &str,
bundle_path: &str,
rootfs: Option<&Mount>,
@ -35,19 +37,25 @@ impl ShareFsRootfs {
} else {
bundle_path.to_string()
};
let mount_result = share_fs_mount
.share_rootfs(ShareFsRootfsConfig {
let share_fs_mount = share_fs.get_share_fs_mount();
let config = ShareFsRootfsConfig {
cid: cid.to_string(),
source: bundle_rootfs.to_string(),
target: ROOTFS.to_string(),
readonly: false,
is_rafs: false,
})
};
let mount_result = share_fs_mount
.share_rootfs(config.clone())
.await
.context("share rootfs")?;
Ok(ShareFsRootfs {
guest_path: mount_result.guest_path,
share_fs: Arc::clone(share_fs),
config,
})
}
}
@ -65,4 +73,17 @@ impl Rootfs for ShareFsRootfs {
async fn get_storage(&self) -> Option<Storage> {
None
}
async fn cleanup(&self) -> Result<()> {
// Umount the mount point shared to guest
let share_fs_mount = self.share_fs.get_share_fs_mount();
share_fs_mount
.umount_rootfs(self.config.clone())
.await
.context("umount shared rootfs")?;
// Umount the bundle rootfs
umount_timeout(&self.config.source, 0).context("umount bundle rootfs")?;
Ok(())
}
}

View File

@ -47,7 +47,7 @@ pub trait ShareFs: Send + Sync {
fn mounted_info_set(&self) -> Arc<Mutex<HashMap<String, MountedInfo>>>;
}
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct ShareFsRootfsConfig {
// TODO: for nydus v5/v6 need to update ShareFsMount
pub cid: String,
@ -127,7 +127,9 @@ pub trait ShareFsMount: Send + Sync {
/// Downgrade to readonly permission
async fn downgrade_to_ro(&self, file_name: &str) -> Result<()>;
/// Umount the volume
async fn umount(&self, file_name: &str) -> Result<()>;
async fn umount_volume(&self, file_name: &str) -> Result<()>;
/// Umount the rootfs
async fn umount_rootfs(&self, config: ShareFsRootfsConfig) -> Result<()>;
}
pub fn new(id: &str, config: &SharedFsInfo) -> Result<Arc<dyn ShareFs>> {

View File

@ -194,10 +194,34 @@ impl ShareFsMount for VirtiofsShareMount {
Ok(())
}
async fn umount(&self, file_name: &str) -> Result<()> {
let host_dest = do_get_host_path(file_name, &self.id, "", true, true);
umount_timeout(&host_dest, 0).context("Umount readwrite host dest")?;
async fn umount_volume(&self, file_name: &str) -> Result<()> {
let host_dest = do_get_host_path(file_name, &self.id, "", true, false);
umount_timeout(&host_dest, 0).context("umount volume")?;
// Umount event will be propagated to ro directory
// Remove the directory of mointpoint
if let Ok(md) = fs::metadata(&host_dest) {
if md.is_file() {
fs::remove_file(&host_dest).context("remove the volume mount point as a file")?;
}
if md.is_dir() {
fs::remove_dir(&host_dest).context("remove the volume mount point as a dir")?;
}
}
Ok(())
}
async fn umount_rootfs(&self, config: ShareFsRootfsConfig) -> Result<()> {
let host_dest = do_get_host_path(&config.target, &self.id, &config.cid, false, false);
umount_timeout(&host_dest, 0).context("umount rootfs")?;
// Remove the directory of mointpoint
if let Ok(md) = fs::metadata(&host_dest) {
if md.is_dir() {
fs::remove_dir(&host_dest).context("remove the rootfs mount point as a dir")?;
}
}
Ok(())
}
}

View File

@ -7,7 +7,7 @@
use std::{
path::{Path, PathBuf},
str::FromStr,
sync::{Arc, Weak},
sync::Arc,
};
use anyhow::{anyhow, Context, Result};
@ -24,7 +24,7 @@ use kata_types::mount;
// device nodes to the guest.
// skip the volumes whose source had already set to guest share dir.
pub(crate) struct ShareFsVolume {
share_fs: Option<Weak<dyn ShareFs>>,
share_fs: Option<Arc<dyn ShareFs>>,
mounts: Vec<oci::Mount>,
storages: Vec<agent::Storage>,
}
@ -40,7 +40,7 @@ impl ShareFsVolume {
let file_name = generate_mount_path("sandbox", file_name);
let mut volume = Self {
share_fs: share_fs.as_ref().map(Arc::downgrade),
share_fs: share_fs.as_ref().map(Arc::clone),
mounts: vec![],
storages: vec![],
};
@ -161,10 +161,7 @@ impl Volume for ShareFsVolume {
if self.share_fs.is_none() {
return Ok(());
}
let share_fs = match self.share_fs.as_ref().unwrap().upgrade() {
Some(share_fs) => share_fs,
None => return Err(anyhow!("The share_fs was released unexpectedly")),
};
let share_fs = self.share_fs.as_ref().unwrap();
let mounted_info_set = share_fs.mounted_info_set();
let mut mounted_info_set = mounted_info_set.lock().await;
@ -219,7 +216,7 @@ impl Volume for ShareFsVolume {
mounted_info_set.remove(&host_source);
// Umount the volume
share_fs_mount
.umount(&file_name)
.umount_volume(&file_name)
.await
.context("Umount volume")?
}

View File

@ -249,6 +249,7 @@ impl ContainerInner {
.await?;
self.clean_volumes().await.context("clean volumes")?;
self.clean_rootfs().await.context("clean rootfs")?;
Ok(())
}
@ -279,7 +280,7 @@ impl ContainerInner {
unhandled.push(Arc::clone(v));
warn!(
sl!(),
"Failed to clean volume {:?}, error = {:?}",
"Failed to clean the volume = {:?}, error = {:?}",
v.get_volume_mount(),
err
);
@ -290,4 +291,23 @@ impl ContainerInner {
}
Ok(())
}
async fn clean_rootfs(&mut self) -> Result<()> {
let mut unhandled = Vec::new();
for rootfs in self.rootfs.iter() {
if let Err(err) = rootfs.cleanup().await {
unhandled.push(Arc::clone(rootfs));
warn!(
sl!(),
"Failed to umount rootfs, cid = {:?}, error = {:?}",
self.container_id(),
err
);
}
}
if !unhandled.is_empty() {
self.rootfs = unhandled;
}
Ok(())
}
}