runtime-rs: cleanup kata host share path

cleanup the /run/kata-containers/shared/sandboxes/pid path

Fixes:#5975
Signed-off-by: Zhongtao Hu <zhongtaohu.tim@linux.alibaba.com>
This commit is contained in:
Zhongtao Hu 2023-01-16 11:18:57 +08:00
parent 20196048bf
commit 2dd2421ad0
8 changed files with 58 additions and 23 deletions

View File

@ -101,9 +101,9 @@ impl ResourceManager {
inner.update_cgroups(cid, linux_resources).await inner.update_cgroups(cid, linux_resources).await
} }
pub async fn delete_cgroups(&self) -> Result<()> { pub async fn cleanup(&self) -> Result<()> {
let inner = self.inner.read().await; let inner = self.inner.read().await;
inner.delete_cgroups().await inner.cleanup().await
} }
} }

View File

@ -8,7 +8,7 @@ use std::{sync::Arc, thread};
use crate::resource_persist::ResourceState; use crate::resource_persist::ResourceState;
use agent::{Agent, Storage}; use agent::{Agent, Storage};
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Ok, Result};
use async_trait::async_trait; use async_trait::async_trait;
use hypervisor::Hypervisor; use hypervisor::Hypervisor;
use kata_types::config::TomlConfig; use kata_types::config::TomlConfig;
@ -233,8 +233,22 @@ impl ResourceManagerInner {
.await .await
} }
pub async fn delete_cgroups(&self) -> Result<()> { pub async fn cleanup(&self) -> Result<()> {
self.cgroups_resource.delete().await // clean up cgroup
self.cgroups_resource
.delete()
.await
.context("delete cgroup")?;
// clean up share fs mount
if let Some(share_fs) = &self.share_fs {
share_fs
.get_share_fs_mount()
.cleanup(&self.sid)
.await
.context("failed to cleanup host path")?;
}
// TODO cleanup other resources
Ok(())
} }
pub async fn dump(&self) { pub async fn dump(&self) {

View File

@ -131,6 +131,8 @@ pub trait ShareFsMount: Send + Sync {
async fn umount_volume(&self, file_name: &str) -> Result<()>; async fn umount_volume(&self, file_name: &str) -> Result<()>;
/// Umount the rootfs /// Umount the rootfs
async fn umount_rootfs(&self, config: &ShareFsRootfsConfig) -> Result<()>; async fn umount_rootfs(&self, config: &ShareFsRootfsConfig) -> Result<()>;
/// Clean up share fs mount
async fn cleanup(&self, sid: &str) -> Result<()>;
} }
pub fn new(id: &str, config: &SharedFsInfo) -> Result<Arc<dyn ShareFs>> { pub fn new(id: &str, config: &SharedFsInfo) -> Result<Arc<dyn ShareFs>> {

View File

@ -59,6 +59,10 @@ pub fn get_host_rw_shared_path(sid: &str) -> PathBuf {
Path::new(KATA_HOST_SHARED_DIR).join(sid).join("rw") Path::new(KATA_HOST_SHARED_DIR).join(sid).join("rw")
} }
pub fn get_host_shared_path(sid: &str) -> PathBuf {
Path::new(KATA_HOST_SHARED_DIR).join(sid)
}
fn do_get_guest_any_path( fn do_get_guest_any_path(
target: &str, target: &str,
cid: &str, cid: &str,

View File

@ -7,7 +7,7 @@
use agent::Storage; use agent::Storage;
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
use async_trait::async_trait; use async_trait::async_trait;
use kata_sys_util::mount::{bind_remount, umount_timeout}; use kata_sys_util::mount::{bind_remount, umount_all, umount_timeout};
use kata_types::k8s::is_watchable_mount; use kata_types::k8s::is_watchable_mount;
use kata_types::mount; use kata_types::mount;
use nix::sys::stat::stat; use nix::sys::stat::stat;
@ -20,7 +20,8 @@ const WATCHABLE_BIND_DEV_TYPE: &str = "watchable-bind";
pub const EPHEMERAL_PATH: &str = "/run/kata-containers/sandbox/ephemeral"; pub const EPHEMERAL_PATH: &str = "/run/kata-containers/sandbox/ephemeral";
use super::{ use super::{
utils::{self, do_get_host_path}, get_host_rw_shared_path,
utils::{self, do_get_host_path, get_host_ro_shared_path, get_host_shared_path},
ShareFsMount, ShareFsMountResult, ShareFsRootfsConfig, ShareFsVolumeConfig, ShareFsMount, ShareFsMountResult, ShareFsRootfsConfig, ShareFsVolumeConfig,
KATA_GUEST_SHARE_DIR, PASSTHROUGH_FS_DIR, KATA_GUEST_SHARE_DIR, PASSTHROUGH_FS_DIR,
}; };
@ -224,4 +225,18 @@ impl ShareFsMount for VirtiofsShareMount {
Ok(()) Ok(())
} }
async fn cleanup(&self, sid: &str) -> Result<()> {
// Unmount ro path
let host_ro_dest = get_host_ro_shared_path(sid);
umount_all(host_ro_dest.clone(), true).context("failed to umount ro path")?;
fs::remove_dir_all(host_ro_dest).context("failed to remove ro path")?;
// As the rootfs and volume have been umounted before calling this function, so just remove the rw dir directly
let host_rw_dest = get_host_rw_shared_path(sid);
fs::remove_dir_all(host_rw_dest).context("failed to remove rw path")?;
// remove the host share directory
let host_path = get_host_shared_path(sid);
fs::remove_dir_all(host_path).context("failed to remove host shared path")?;
Ok(())
}
} }

View File

@ -11,7 +11,7 @@ use async_trait::async_trait;
pub trait Sandbox: Send + Sync { pub trait Sandbox: Send + Sync {
async fn start(&self, netns: Option<String>) -> Result<()>; async fn start(&self, netns: Option<String>) -> Result<()>;
async fn stop(&self) -> Result<()>; async fn stop(&self) -> Result<()>;
async fn cleanup(&self, container_id: &str) -> Result<()>; async fn cleanup(&self) -> Result<()>;
async fn shutdown(&self) -> Result<()>; async fn shutdown(&self) -> Result<()>;
// agent function // agent function

View File

@ -174,7 +174,7 @@ impl RuntimeHandlerManager {
.await .await
.context("failed to restore the sandbox")?; .context("failed to restore the sandbox")?;
sandbox sandbox
.cleanup(&inner.id) .cleanup()
.await .await
.context("failed to cleanup the resource")?; .context("failed to cleanup the resource")?;
} }

View File

@ -242,17 +242,7 @@ impl Sandbox for VirtSandbox {
self.stop().await.context("stop")?; self.stop().await.context("stop")?;
info!(sl!(), "delete cgroup"); self.cleanup().await.context("do the clean up")?;
self.resource_manager
.delete_cgroups()
.await
.context("delete cgroups")?;
info!(sl!(), "delete hypervisor");
self.hypervisor
.cleanup()
.await
.context("delete hypervisor")?;
info!(sl!(), "stop monitor"); info!(sl!(), "stop monitor");
self.monitor.stop().await; self.monitor.stop().await;
@ -269,9 +259,19 @@ impl Sandbox for VirtSandbox {
Ok(()) Ok(())
} }
async fn cleanup(&self, _id: &str) -> Result<()> { async fn cleanup(&self) -> Result<()> {
self.resource_manager.delete_cgroups().await?; info!(sl!(), "delete hypervisor");
self.hypervisor.cleanup().await?; self.hypervisor
.cleanup()
.await
.context("delete hypervisor")?;
info!(sl!(), "resource clean up");
self.resource_manager
.cleanup()
.await
.context("resource clean up")?;
// TODO: cleanup other snadbox resource // TODO: cleanup other snadbox resource
Ok(()) Ok(())
} }