mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-09-17 06:48:51 +00:00
runtime-rs: Integrate VolumeManager into ShareFsVolume lifecycle
This commit integrates the new `VolumeManager` into the `ShareFsVolume` lifecycle. Instead of directly copying files, `ShareFsVolume::new` now uses the `VolumeManager` to get a guest path and determine if the volume needs to be copied. It also updates the `cleanup` function to release the volume's reference count, allowing the `VolumeManager` to manage its state and clean up resources when no longer in use. Signed-off-by: Alex Lyn <alex.lyn@antgroup.com>
This commit is contained in:
@@ -53,7 +53,12 @@ pub(crate) struct ShareFsVolume {
|
|||||||
share_fs: Option<Arc<dyn ShareFs>>,
|
share_fs: Option<Arc<dyn ShareFs>>,
|
||||||
mounts: Vec<oci::Mount>,
|
mounts: Vec<oci::Mount>,
|
||||||
storages: Vec<agent::Storage>,
|
storages: Vec<agent::Storage>,
|
||||||
monitor_task: Option<JoinHandle<()>>,
|
// Add volume manager reference
|
||||||
|
volume_manager: Option<Arc<VolumeManager>>,
|
||||||
|
// Record the source path for cleanup
|
||||||
|
source_path: Option<String>,
|
||||||
|
// Record the container ID
|
||||||
|
container_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Directory Monitor Config
|
/// Directory Monitor Config
|
||||||
@@ -291,7 +296,6 @@ struct VolumeState {
|
|||||||
monitor_task: Option<Arc<JoinHandle<()>>>,
|
monitor_task: Option<Arc<JoinHandle<()>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
impl VolumeManager {
|
impl VolumeManager {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@@ -351,7 +355,9 @@ impl VolumeManager {
|
|||||||
|
|
||||||
info!(
|
info!(
|
||||||
sl!(),
|
sl!(),
|
||||||
"Created new volume: source={:?}, guest={:?}", state.source_path, state.guest_path,
|
"Created new volume state: source={:?}, guest={:?}",
|
||||||
|
state.source_path,
|
||||||
|
state.guest_path,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Return guest path and whether a copy is needed (true, as it's new)
|
// Return guest path and whether a copy is needed (true, as it's new)
|
||||||
@@ -429,6 +435,8 @@ impl ShareFsVolume {
|
|||||||
readonly: bool,
|
readonly: bool,
|
||||||
agent: Arc<dyn Agent>,
|
agent: Arc<dyn Agent>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
|
// TODO: The volume manager should be passed by ShareFsVolume::new(...,volume_manager)
|
||||||
|
let volume_manager: Arc<VolumeManager> = Arc::new(VolumeManager::new());
|
||||||
// The file_name is in the format of "sandbox-{uuid}-{file_name}"
|
// The file_name is in the format of "sandbox-{uuid}-{file_name}"
|
||||||
let source_path = get_mount_path(m.source());
|
let source_path = get_mount_path(m.source());
|
||||||
let file_name = Path::new(&source_path)
|
let file_name = Path::new(&source_path)
|
||||||
@@ -442,59 +450,66 @@ impl ShareFsVolume {
|
|||||||
share_fs: share_fs.as_ref().map(Arc::clone),
|
share_fs: share_fs.as_ref().map(Arc::clone),
|
||||||
mounts: vec![],
|
mounts: vec![],
|
||||||
storages: vec![],
|
storages: vec![],
|
||||||
monitor_task: None,
|
volume_manager: Some(volume_manager.clone()),
|
||||||
|
source_path: Some(source_path.clone()),
|
||||||
|
container_id: cid.to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
match share_fs {
|
match share_fs {
|
||||||
None => {
|
None => {
|
||||||
let src = match std::fs::canonicalize(&source_path) {
|
// Get or create the guest path
|
||||||
Err(err) => {
|
let (guest_path, need_copy) = volume_manager
|
||||||
return Err(anyhow!(format!(
|
.get_or_create_volume(&source_path, cid, m.destination())
|
||||||
"failed to canonicalize file {} {:?}",
|
.await?;
|
||||||
&source_path, err
|
|
||||||
)))
|
let src = std::fs::canonicalize(&source_path)?;
|
||||||
|
|
||||||
|
// Only copy if needed (first time creating)
|
||||||
|
if need_copy {
|
||||||
|
info!(
|
||||||
|
sl!(),
|
||||||
|
"First time creating volume, copying from {:?} to {:?}", src, guest_path
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut monitor_task = None;
|
||||||
|
|
||||||
|
if src.is_file() {
|
||||||
|
// Copy a single file
|
||||||
|
Self::copy_file_to_guest(&src, &guest_path, &agent).await?;
|
||||||
|
} else if src.is_dir() {
|
||||||
|
// Create directory
|
||||||
|
Self::copy_directory_to_guest(&src, &guest_path, &agent).await?;
|
||||||
|
|
||||||
|
// Start monitoring (only for watchable volumes)
|
||||||
|
if is_watchable_volume(&src) {
|
||||||
|
info!(sl!(), "Starting monitor for watchable volume: {:?}", src);
|
||||||
|
let watcher = FsWatcher::new(&src).await?;
|
||||||
|
let handle = watcher
|
||||||
|
.start_monitor(
|
||||||
|
agent.clone(),
|
||||||
|
src.clone(),
|
||||||
|
PathBuf::from(&guest_path),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
monitor_task = Some(handle);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
sl!(),
|
||||||
|
"Ignoring non-regular file {:?} as FS sharing not supported", src
|
||||||
|
);
|
||||||
|
return Ok(volume);
|
||||||
}
|
}
|
||||||
Ok(src) => src,
|
|
||||||
};
|
|
||||||
|
|
||||||
// let mut monitor_task = None;
|
// Mark as copied
|
||||||
// This is where we set the value for the guest path
|
volume_manager
|
||||||
let guest_path = [
|
.mark_as_copied(&source_path, monitor_task)
|
||||||
DEFAULT_KATA_GUEST_SANDBOX_DIR,
|
.await?;
|
||||||
PASSTHROUGH_FS_DIR,
|
|
||||||
file_name.clone().as_str(),
|
|
||||||
]
|
|
||||||
.join("/");
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
sl!(),
|
|
||||||
"copy local file {:?} to guest {:?}",
|
|
||||||
&source_path,
|
|
||||||
guest_path.clone()
|
|
||||||
);
|
|
||||||
// If the mount source is a file, we can copy it to the sandbox
|
|
||||||
if src.is_file() {
|
|
||||||
// Copy a single file
|
|
||||||
Self::copy_file_to_guest(&src, &guest_path, &agent).await?;
|
|
||||||
} else if src.is_dir() {
|
|
||||||
// Create directory
|
|
||||||
Self::copy_directory_to_guest(&src, &guest_path, &agent).await?;
|
|
||||||
|
|
||||||
// Start monitoring (only for watchable volumes)
|
|
||||||
if is_watchable_volume(&src) {
|
|
||||||
info!(sl!(), "Starting monitor for watchable volume: {:?}", src);
|
|
||||||
let watcher = FsWatcher::new(&src).await?;
|
|
||||||
let monitor_task = watcher
|
|
||||||
.start_monitor(agent.clone(), src.clone(), PathBuf::from(&guest_path))
|
|
||||||
.await;
|
|
||||||
volume.monitor_task = Some(monitor_task);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// If not, we can ignore it. Let's issue a warning so that the user knows.
|
|
||||||
warn!(
|
warn!(
|
||||||
sl!(),
|
sl!(),
|
||||||
"Ignoring non-regular file {:?} as FS sharing not supported", src
|
"Volume already exists in guest, skipping copy: {:?}", guest_path
|
||||||
);
|
);
|
||||||
return Ok(volume);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create mount configuration
|
// Create mount configuration
|
||||||
@@ -512,6 +527,8 @@ impl ShareFsVolume {
|
|||||||
options.push("rprivate".into());
|
options.push("rprivate".into());
|
||||||
}
|
}
|
||||||
oci_mount.set_options(Some(options));
|
oci_mount.set_options(Some(options));
|
||||||
|
|
||||||
|
volume.mounts.push(oci_mount);
|
||||||
}
|
}
|
||||||
Some(share_fs) => {
|
Some(share_fs) => {
|
||||||
let share_fs_mount = share_fs.get_share_fs_mount();
|
let share_fs_mount = share_fs.get_share_fs_mount();
|
||||||
@@ -590,6 +607,7 @@ impl ShareFsVolume {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(volume)
|
Ok(volume)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -664,7 +682,26 @@ impl Volume for ShareFsVolume {
|
|||||||
async fn cleanup(&self, _device_manager: &RwLock<DeviceManager>) -> Result<()> {
|
async fn cleanup(&self, _device_manager: &RwLock<DeviceManager>) -> Result<()> {
|
||||||
let share_fs = match self.share_fs.as_ref() {
|
let share_fs = match self.share_fs.as_ref() {
|
||||||
Some(fs) => fs,
|
Some(fs) => fs,
|
||||||
None => return Ok(()),
|
None => {
|
||||||
|
return {
|
||||||
|
// Release volume reference
|
||||||
|
if let (Some(manager), Some(source)) = (&self.volume_manager, &self.source_path)
|
||||||
|
{
|
||||||
|
let should_cleanup =
|
||||||
|
manager.release_volume(source, &self.container_id).await?;
|
||||||
|
|
||||||
|
if should_cleanup {
|
||||||
|
info!(
|
||||||
|
sl!(),
|
||||||
|
"Volume {:?} has no more references, can be cleaned up", source
|
||||||
|
);
|
||||||
|
// NOTE: We cannot delete files from the guest because there is no corresponding API
|
||||||
|
// Files will be cleaned up automatically when the sandbox is destroyed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mounted_info_set = share_fs.mounted_info_set();
|
let mounted_info_set = share_fs.mounted_info_set();
|
||||||
@@ -736,7 +773,6 @@ impl Volume for ShareFsVolume {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
async fn copy_dir_recursively<P: AsRef<Path>>(
|
async fn copy_dir_recursively<P: AsRef<Path>>(
|
||||||
src_dir: P,
|
src_dir: P,
|
||||||
dest_dir: &str,
|
dest_dir: &str,
|
||||||
|
Reference in New Issue
Block a user