runtime-rs: Clean up mount points shared to guest

Fixed issues where shared volumes couldn't umount correctly.

The rootfs of each container is cleaned up after the container is killed, except
for `NydusRootfs`. `ShareFsRootfs::cleanup()` calls
`VirtiofsShareMount::umount_rootfs()` to umount mount points shared to the
guest, and umounts the bundle rootfs.

Fixes: #5898

Signed-off-by: Xuewei Niu <niuxuewei.nxw@antgroup.com>
This commit is contained in:
Xuewei Niu 2022-12-14 11:32:00 +08:00
parent e4645642d0
commit 0e69207909
7 changed files with 112 additions and 47 deletions

View File

@ -27,6 +27,7 @@ pub trait Rootfs: Send + Sync {
async fn get_guest_rootfs_path(&self) -> Result<String>; async fn get_guest_rootfs_path(&self) -> Result<String>;
async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>>; async fn get_rootfs_mount(&self) -> Result<Vec<oci::Mount>>;
async fn get_storage(&self) -> Option<Storage>; async fn get_storage(&self) -> Option<Storage>;
async fn cleanup(&self) -> Result<()>;
} }
#[derive(Default)] #[derive(Default)]
@ -66,11 +67,10 @@ impl RootFsResource {
// if rootfs_mounts is empty // if rootfs_mounts is empty
mounts_vec if mounts_vec.is_empty() => { mounts_vec if mounts_vec.is_empty() => {
if let Some(share_fs) = share_fs { if let Some(share_fs) = share_fs {
let share_fs_mount = share_fs.get_share_fs_mount();
// share fs rootfs // share fs rootfs
Ok(Arc::new( Ok(Arc::new(
share_fs_rootfs::ShareFsRootfs::new( share_fs_rootfs::ShareFsRootfs::new(
&share_fs_mount, share_fs,
cid, cid,
root.path.as_str(), root.path.as_str(),
None, None,
@ -86,25 +86,18 @@ impl RootFsResource {
// Safe as single_layer_rootfs must have one layer // Safe as single_layer_rootfs must have one layer
let layer = &mounts_vec[0]; let layer = &mounts_vec[0];
let rootfs: Arc<dyn Rootfs> = if let Some(share_fs) = share_fs { let rootfs: Arc<dyn Rootfs> = if let Some(share_fs) = share_fs {
let share_fs_mount = share_fs.get_share_fs_mount();
// nydus rootfs // nydus rootfs
if layer.fs_type == NYDUS_ROOTFS_TYPE { if layer.fs_type == NYDUS_ROOTFS_TYPE {
Arc::new( Arc::new(
nydus_rootfs::NydusRootfs::new( nydus_rootfs::NydusRootfs::new(share_fs, hypervisor, sid, cid, layer)
&share_fs_mount, .await
hypervisor, .context("new nydus rootfs")?,
sid,
cid,
layer,
)
.await
.context("new nydus rootfs")?,
) )
} else { } else {
// share fs rootfs // share fs rootfs
Arc::new( Arc::new(
share_fs_rootfs::ShareFsRootfs::new( share_fs_rootfs::ShareFsRootfs::new(
&share_fs_mount, share_fs,
cid, cid,
bundle_path, bundle_path,
Some(layer), Some(layer),

View File

@ -9,8 +9,8 @@ use super::{Rootfs, TYPE_OVERLAY_FS};
use crate::{ use crate::{
rootfs::{HYBRID_ROOTFS_LOWER_DIR, ROOTFS}, rootfs::{HYBRID_ROOTFS_LOWER_DIR, ROOTFS},
share_fs::{ share_fs::{
do_get_guest_path, do_get_guest_share_path, get_host_rw_shared_path, rafs_mount, do_get_guest_path, do_get_guest_share_path, get_host_rw_shared_path, rafs_mount, ShareFs,
ShareFsMount, ShareFsRootfsConfig, PASSTHROUGH_FS_DIR, ShareFsRootfsConfig, PASSTHROUGH_FS_DIR,
}, },
}; };
use agent::Storage; use agent::Storage;
@ -36,16 +36,25 @@ pub(crate) struct NydusRootfs {
impl NydusRootfs { impl NydusRootfs {
pub async fn new( pub async fn new(
share_fs_mount: &Arc<dyn ShareFsMount>, share_fs: &Arc<dyn ShareFs>,
h: &dyn Hypervisor, h: &dyn Hypervisor,
sid: &str, sid: &str,
cid: &str, cid: &str,
rootfs: &Mount, rootfs: &Mount,
) -> Result<Self> { ) -> Result<Self> {
let share_fs = Arc::clone(share_fs);
let share_fs_mount = share_fs.get_share_fs_mount();
let extra_options = let extra_options =
NydusExtraOptions::new(rootfs).context("failed to parse nydus extra options")?; NydusExtraOptions::new(rootfs).context("failed to parse nydus extra options")?;
info!(sl!(), "extra_option {:?}", &extra_options); info!(sl!(), "extra_option {:?}", &extra_options);
let rafs_meta = &extra_options.source; let rafs_meta = &extra_options.source;
let config = ShareFsRootfsConfig {
cid: cid.to_string(),
source: extra_options.snapshot_dir.clone(),
target: SNAPSHOT_DIR.to_string(),
readonly: true,
is_rafs: false,
};
let (rootfs_storage, rootfs_guest_path) = match extra_options.fs_version.as_str() { let (rootfs_storage, rootfs_guest_path) = match extra_options.fs_version.as_str() {
// both nydus v5 and v6 can be handled by the builtin nydus in dragonball by using the rafs mode. // both nydus v5 and v6 can be handled by the builtin nydus in dragonball by using the rafs mode.
// nydus v6 could also be handled by the guest kernel as well, but some kernel patch is not support in the upstream community. We will add an option to let runtime-rs handle nydus v6 in the guest kernel optionally once the patch is ready // nydus v6 could also be handled by the guest kernel as well, but some kernel patch is not support in the upstream community. We will add an option to let runtime-rs handle nydus v6 in the guest kernel optionally once the patch is ready
@ -72,13 +81,7 @@ impl NydusRootfs {
let rootfs_guest_path = do_get_guest_path(ROOTFS, cid, false, false); let rootfs_guest_path = do_get_guest_path(ROOTFS, cid, false, false);
// bind mount the snapshot dir under the share directory // bind mount the snapshot dir under the share directory
share_fs_mount share_fs_mount
.share_rootfs(ShareFsRootfsConfig { .share_rootfs(config.clone())
cid: cid.to_string(),
source: extra_options.snapshot_dir.clone(),
target: SNAPSHOT_DIR.to_string(),
readonly: true,
is_rafs: false,
})
.await .await
.context("share nydus rootfs")?; .context("share nydus rootfs")?;
let mut options: Vec<String> = Vec::new(); let mut options: Vec<String> = Vec::new();
@ -143,4 +146,9 @@ impl Rootfs for NydusRootfs {
async fn get_storage(&self) -> Option<Storage> { async fn get_storage(&self) -> Option<Storage> {
Some(self.rootfs.clone()) Some(self.rootfs.clone())
} }
async fn cleanup(&self) -> Result<()> {
warn!(sl!(), "Cleaning up Nydus Rootfs is still unimplemented.");
Ok(())
}
} }

View File

@ -7,20 +7,22 @@
use agent::Storage; use agent::Storage;
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use async_trait::async_trait; use async_trait::async_trait;
use kata_sys_util::mount::Mounter; use kata_sys_util::mount::{umount_timeout, Mounter};
use kata_types::mount::Mount; use kata_types::mount::Mount;
use std::sync::Arc; use std::sync::Arc;
use super::{Rootfs, ROOTFS}; use super::{Rootfs, ROOTFS};
use crate::share_fs::{ShareFsMount, ShareFsRootfsConfig}; use crate::share_fs::{ShareFs, ShareFsRootfsConfig};
pub(crate) struct ShareFsRootfs { pub(crate) struct ShareFsRootfs {
guest_path: String, guest_path: String,
share_fs: Arc<dyn ShareFs>,
config: ShareFsRootfsConfig,
} }
impl ShareFsRootfs { impl ShareFsRootfs {
pub async fn new( pub async fn new(
share_fs_mount: &Arc<dyn ShareFsMount>, share_fs: &Arc<dyn ShareFs>,
cid: &str, cid: &str,
bundle_path: &str, bundle_path: &str,
rootfs: Option<&Mount>, rootfs: Option<&Mount>,
@ -35,19 +37,25 @@ impl ShareFsRootfs {
} else { } else {
bundle_path.to_string() bundle_path.to_string()
}; };
let share_fs_mount = share_fs.get_share_fs_mount();
let config = ShareFsRootfsConfig {
cid: cid.to_string(),
source: bundle_rootfs.to_string(),
target: ROOTFS.to_string(),
readonly: false,
is_rafs: false,
};
let mount_result = share_fs_mount let mount_result = share_fs_mount
.share_rootfs(ShareFsRootfsConfig { .share_rootfs(config.clone())
cid: cid.to_string(),
source: bundle_rootfs.to_string(),
target: ROOTFS.to_string(),
readonly: false,
is_rafs: false,
})
.await .await
.context("share rootfs")?; .context("share rootfs")?;
Ok(ShareFsRootfs { Ok(ShareFsRootfs {
guest_path: mount_result.guest_path, guest_path: mount_result.guest_path,
share_fs: Arc::clone(share_fs),
config,
}) })
} }
} }
@ -65,4 +73,17 @@ impl Rootfs for ShareFsRootfs {
async fn get_storage(&self) -> Option<Storage> { async fn get_storage(&self) -> Option<Storage> {
None None
} }
async fn cleanup(&self) -> Result<()> {
// Umount the mount point shared to guest
let share_fs_mount = self.share_fs.get_share_fs_mount();
share_fs_mount
.umount_rootfs(self.config.clone())
.await
.context("umount shared rootfs")?;
// Umount the bundle rootfs
umount_timeout(&self.config.source, 0).context("umount bundle rootfs")?;
Ok(())
}
} }

View File

@ -47,7 +47,7 @@ pub trait ShareFs: Send + Sync {
fn mounted_info_set(&self) -> Arc<Mutex<HashMap<String, MountedInfo>>>; fn mounted_info_set(&self) -> Arc<Mutex<HashMap<String, MountedInfo>>>;
} }
#[derive(Debug)] #[derive(Debug, Clone)]
pub struct ShareFsRootfsConfig { pub struct ShareFsRootfsConfig {
// TODO: for nydus v5/v6 need to update ShareFsMount // TODO: for nydus v5/v6 need to update ShareFsMount
pub cid: String, pub cid: String,
@ -127,7 +127,9 @@ pub trait ShareFsMount: Send + Sync {
/// Downgrade to readonly permission /// Downgrade to readonly permission
async fn downgrade_to_ro(&self, file_name: &str) -> Result<()>; async fn downgrade_to_ro(&self, file_name: &str) -> Result<()>;
/// Umount the volume /// Umount the volume
async fn umount(&self, file_name: &str) -> Result<()>; async fn umount_volume(&self, file_name: &str) -> Result<()>;
/// Umount the rootfs
async fn umount_rootfs(&self, config: ShareFsRootfsConfig) -> Result<()>;
} }
pub fn new(id: &str, config: &SharedFsInfo) -> Result<Arc<dyn ShareFs>> { pub fn new(id: &str, config: &SharedFsInfo) -> Result<Arc<dyn ShareFs>> {

View File

@ -194,10 +194,34 @@ impl ShareFsMount for VirtiofsShareMount {
Ok(()) Ok(())
} }
async fn umount(&self, file_name: &str) -> Result<()> { async fn umount_volume(&self, file_name: &str) -> Result<()> {
let host_dest = do_get_host_path(file_name, &self.id, "", true, true); let host_dest = do_get_host_path(file_name, &self.id, "", true, false);
umount_timeout(&host_dest, 0).context("Umount readwrite host dest")?; umount_timeout(&host_dest, 0).context("umount volume")?;
// Umount event will be propagated to ro directory // Umount event will be propagated to ro directory
// Remove the directory of mointpoint
if let Ok(md) = fs::metadata(&host_dest) {
if md.is_file() {
fs::remove_file(&host_dest).context("remove the volume mount point as a file")?;
}
if md.is_dir() {
fs::remove_dir(&host_dest).context("remove the volume mount point as a dir")?;
}
}
Ok(())
}
async fn umount_rootfs(&self, config: ShareFsRootfsConfig) -> Result<()> {
let host_dest = do_get_host_path(&config.target, &self.id, &config.cid, false, false);
umount_timeout(&host_dest, 0).context("umount rootfs")?;
// Remove the directory of mointpoint
if let Ok(md) = fs::metadata(&host_dest) {
if md.is_dir() {
fs::remove_dir(&host_dest).context("remove the rootfs mount point as a dir")?;
}
}
Ok(()) Ok(())
} }
} }

View File

@ -7,7 +7,7 @@
use std::{ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, str::FromStr,
sync::{Arc, Weak}, sync::Arc,
}; };
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
@ -24,7 +24,7 @@ use kata_types::mount;
// device nodes to the guest. // device nodes to the guest.
// skip the volumes whose source had already set to guest share dir. // skip the volumes whose source had already set to guest share dir.
pub(crate) struct ShareFsVolume { pub(crate) struct ShareFsVolume {
share_fs: Option<Weak<dyn ShareFs>>, share_fs: Option<Arc<dyn ShareFs>>,
mounts: Vec<oci::Mount>, mounts: Vec<oci::Mount>,
storages: Vec<agent::Storage>, storages: Vec<agent::Storage>,
} }
@ -40,7 +40,7 @@ impl ShareFsVolume {
let file_name = generate_mount_path("sandbox", file_name); let file_name = generate_mount_path("sandbox", file_name);
let mut volume = Self { let mut volume = Self {
share_fs: share_fs.as_ref().map(Arc::downgrade), share_fs: share_fs.as_ref().map(Arc::clone),
mounts: vec![], mounts: vec![],
storages: vec![], storages: vec![],
}; };
@ -161,10 +161,7 @@ impl Volume for ShareFsVolume {
if self.share_fs.is_none() { if self.share_fs.is_none() {
return Ok(()); return Ok(());
} }
let share_fs = match self.share_fs.as_ref().unwrap().upgrade() { let share_fs = self.share_fs.as_ref().unwrap();
Some(share_fs) => share_fs,
None => return Err(anyhow!("The share_fs was released unexpectedly")),
};
let mounted_info_set = share_fs.mounted_info_set(); let mounted_info_set = share_fs.mounted_info_set();
let mut mounted_info_set = mounted_info_set.lock().await; let mut mounted_info_set = mounted_info_set.lock().await;
@ -219,7 +216,7 @@ impl Volume for ShareFsVolume {
mounted_info_set.remove(&host_source); mounted_info_set.remove(&host_source);
// Umount the volume // Umount the volume
share_fs_mount share_fs_mount
.umount(&file_name) .umount_volume(&file_name)
.await .await
.context("Umount volume")? .context("Umount volume")?
} }

View File

@ -249,6 +249,7 @@ impl ContainerInner {
.await?; .await?;
self.clean_volumes().await.context("clean volumes")?; self.clean_volumes().await.context("clean volumes")?;
self.clean_rootfs().await.context("clean rootfs")?;
Ok(()) Ok(())
} }
@ -279,7 +280,7 @@ impl ContainerInner {
unhandled.push(Arc::clone(v)); unhandled.push(Arc::clone(v));
warn!( warn!(
sl!(), sl!(),
"Failed to clean volume {:?}, error = {:?}", "Failed to clean the volume = {:?}, error = {:?}",
v.get_volume_mount(), v.get_volume_mount(),
err err
); );
@ -290,4 +291,23 @@ impl ContainerInner {
} }
Ok(()) Ok(())
} }
async fn clean_rootfs(&mut self) -> Result<()> {
let mut unhandled = Vec::new();
for rootfs in self.rootfs.iter() {
if let Err(err) = rootfs.cleanup().await {
unhandled.push(Arc::clone(rootfs));
warn!(
sl!(),
"Failed to umount rootfs, cid = {:?}, error = {:?}",
self.container_id(),
err
);
}
}
if !unhandled.is_empty() {
self.rootfs = unhandled;
}
Ok(())
}
} }