mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-29 12:14:48 +00:00
runtime-rs: cleanup kata host share path
cleanup the /run/kata-containers/shared/sandboxes/pid path Fixes:#5975 Signed-off-by: Zhongtao Hu <zhongtaohu.tim@linux.alibaba.com>
This commit is contained in:
parent
20196048bf
commit
2dd2421ad0
@ -101,9 +101,9 @@ impl ResourceManager {
|
||||
inner.update_cgroups(cid, linux_resources).await
|
||||
}
|
||||
|
||||
pub async fn delete_cgroups(&self) -> Result<()> {
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let inner = self.inner.read().await;
|
||||
inner.delete_cgroups().await
|
||||
inner.cleanup().await
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@ use std::{sync::Arc, thread};
|
||||
|
||||
use crate::resource_persist::ResourceState;
|
||||
use agent::{Agent, Storage};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use anyhow::{anyhow, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use hypervisor::Hypervisor;
|
||||
use kata_types::config::TomlConfig;
|
||||
@ -233,8 +233,22 @@ impl ResourceManagerInner {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn delete_cgroups(&self) -> Result<()> {
|
||||
self.cgroups_resource.delete().await
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
// clean up cgroup
|
||||
self.cgroups_resource
|
||||
.delete()
|
||||
.await
|
||||
.context("delete cgroup")?;
|
||||
// clean up share fs mount
|
||||
if let Some(share_fs) = &self.share_fs {
|
||||
share_fs
|
||||
.get_share_fs_mount()
|
||||
.cleanup(&self.sid)
|
||||
.await
|
||||
.context("failed to cleanup host path")?;
|
||||
}
|
||||
// TODO cleanup other resources
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn dump(&self) {
|
||||
|
@ -131,6 +131,8 @@ pub trait ShareFsMount: Send + Sync {
|
||||
async fn umount_volume(&self, file_name: &str) -> Result<()>;
|
||||
/// Umount the rootfs
|
||||
async fn umount_rootfs(&self, config: &ShareFsRootfsConfig) -> Result<()>;
|
||||
/// Clean up share fs mount
|
||||
async fn cleanup(&self, sid: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
pub fn new(id: &str, config: &SharedFsInfo) -> Result<Arc<dyn ShareFs>> {
|
||||
|
@ -59,6 +59,10 @@ pub fn get_host_rw_shared_path(sid: &str) -> PathBuf {
|
||||
Path::new(KATA_HOST_SHARED_DIR).join(sid).join("rw")
|
||||
}
|
||||
|
||||
pub fn get_host_shared_path(sid: &str) -> PathBuf {
|
||||
Path::new(KATA_HOST_SHARED_DIR).join(sid)
|
||||
}
|
||||
|
||||
fn do_get_guest_any_path(
|
||||
target: &str,
|
||||
cid: &str,
|
||||
|
@ -7,7 +7,7 @@
|
||||
use agent::Storage;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use kata_sys_util::mount::{bind_remount, umount_timeout};
|
||||
use kata_sys_util::mount::{bind_remount, umount_all, umount_timeout};
|
||||
use kata_types::k8s::is_watchable_mount;
|
||||
use kata_types::mount;
|
||||
use nix::sys::stat::stat;
|
||||
@ -20,7 +20,8 @@ const WATCHABLE_BIND_DEV_TYPE: &str = "watchable-bind";
|
||||
pub const EPHEMERAL_PATH: &str = "/run/kata-containers/sandbox/ephemeral";
|
||||
|
||||
use super::{
|
||||
utils::{self, do_get_host_path},
|
||||
get_host_rw_shared_path,
|
||||
utils::{self, do_get_host_path, get_host_ro_shared_path, get_host_shared_path},
|
||||
ShareFsMount, ShareFsMountResult, ShareFsRootfsConfig, ShareFsVolumeConfig,
|
||||
KATA_GUEST_SHARE_DIR, PASSTHROUGH_FS_DIR,
|
||||
};
|
||||
@ -224,4 +225,18 @@ impl ShareFsMount for VirtiofsShareMount {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cleanup(&self, sid: &str) -> Result<()> {
|
||||
// Unmount ro path
|
||||
let host_ro_dest = get_host_ro_shared_path(sid);
|
||||
umount_all(host_ro_dest.clone(), true).context("failed to umount ro path")?;
|
||||
fs::remove_dir_all(host_ro_dest).context("failed to remove ro path")?;
|
||||
// As the rootfs and volume have been umounted before calling this function, so just remove the rw dir directly
|
||||
let host_rw_dest = get_host_rw_shared_path(sid);
|
||||
fs::remove_dir_all(host_rw_dest).context("failed to remove rw path")?;
|
||||
// remove the host share directory
|
||||
let host_path = get_host_shared_path(sid);
|
||||
fs::remove_dir_all(host_path).context("failed to remove host shared path")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ use async_trait::async_trait;
|
||||
pub trait Sandbox: Send + Sync {
|
||||
async fn start(&self, netns: Option<String>) -> Result<()>;
|
||||
async fn stop(&self) -> Result<()>;
|
||||
async fn cleanup(&self, container_id: &str) -> Result<()>;
|
||||
async fn cleanup(&self) -> Result<()>;
|
||||
async fn shutdown(&self) -> Result<()>;
|
||||
|
||||
// agent function
|
||||
|
@ -174,7 +174,7 @@ impl RuntimeHandlerManager {
|
||||
.await
|
||||
.context("failed to restore the sandbox")?;
|
||||
sandbox
|
||||
.cleanup(&inner.id)
|
||||
.cleanup()
|
||||
.await
|
||||
.context("failed to cleanup the resource")?;
|
||||
}
|
||||
|
@ -242,17 +242,7 @@ impl Sandbox for VirtSandbox {
|
||||
|
||||
self.stop().await.context("stop")?;
|
||||
|
||||
info!(sl!(), "delete cgroup");
|
||||
self.resource_manager
|
||||
.delete_cgroups()
|
||||
.await
|
||||
.context("delete cgroups")?;
|
||||
|
||||
info!(sl!(), "delete hypervisor");
|
||||
self.hypervisor
|
||||
.cleanup()
|
||||
.await
|
||||
.context("delete hypervisor")?;
|
||||
self.cleanup().await.context("do the clean up")?;
|
||||
|
||||
info!(sl!(), "stop monitor");
|
||||
self.monitor.stop().await;
|
||||
@ -269,9 +259,19 @@ impl Sandbox for VirtSandbox {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cleanup(&self, _id: &str) -> Result<()> {
|
||||
self.resource_manager.delete_cgroups().await?;
|
||||
self.hypervisor.cleanup().await?;
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
info!(sl!(), "delete hypervisor");
|
||||
self.hypervisor
|
||||
.cleanup()
|
||||
.await
|
||||
.context("delete hypervisor")?;
|
||||
|
||||
info!(sl!(), "resource clean up");
|
||||
self.resource_manager
|
||||
.cleanup()
|
||||
.await
|
||||
.context("resource clean up")?;
|
||||
|
||||
// TODO: cleanup other snadbox resource
|
||||
Ok(())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user