mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-09-17 23:07:55 +00:00
Merge pull request #5607 from justxuewei/feat/sandbox-level-volume
runtime-rs: bind mount volumes in sandbox level
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -4,6 +4,8 @@
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
**/.idea
|
||||
**/.fleet
|
||||
pkg/logging/Cargo.lock
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
|
@@ -213,11 +213,11 @@ pub fn create_mount_destination<S: AsRef<Path>, D: AsRef<Path>, R: AsRef<Path>>(
|
||||
}
|
||||
}
|
||||
|
||||
/// Remount a bind mount into readonly mode.
|
||||
/// Remount a bind mount
|
||||
///
|
||||
/// # Safety
|
||||
/// Caller needs to ensure safety of the `dst` to avoid possible file path based attacks.
|
||||
pub fn bind_remount_read_only<P: AsRef<Path>>(dst: P) -> Result<()> {
|
||||
pub fn bind_remount<P: AsRef<Path>>(dst: P, readonly: bool) -> Result<()> {
|
||||
let dst = dst.as_ref();
|
||||
if dst.is_empty() {
|
||||
return Err(Error::NullMountPointPath);
|
||||
@@ -226,7 +226,7 @@ pub fn bind_remount_read_only<P: AsRef<Path>>(dst: P) -> Result<()> {
|
||||
.canonicalize()
|
||||
.map_err(|_e| Error::InvalidPath(dst.to_path_buf()))?;
|
||||
|
||||
do_rebind_mount_read_only(dst, MsFlags::empty())
|
||||
do_rebind_mount(dst, readonly, MsFlags::empty())
|
||||
}
|
||||
|
||||
/// Bind mount `src` to `dst` in slave mode, optionally in readonly mode if `readonly` is true.
|
||||
@@ -239,7 +239,7 @@ pub fn bind_remount_read_only<P: AsRef<Path>>(dst: P) -> Result<()> {
|
||||
pub fn bind_mount_unchecked<S: AsRef<Path>, D: AsRef<Path>>(
|
||||
src: S,
|
||||
dst: D,
|
||||
read_only: bool,
|
||||
readonly: bool,
|
||||
) -> Result<()> {
|
||||
fail::fail_point!("bind_mount", |_| {
|
||||
Err(Error::FailureInject(
|
||||
@@ -275,8 +275,8 @@ pub fn bind_mount_unchecked<S: AsRef<Path>, D: AsRef<Path>>(
|
||||
.map_err(|e| Error::Mount(PathBuf::new(), dst.to_path_buf(), e))?;
|
||||
|
||||
// Optionally rebind into readonly mode.
|
||||
if read_only {
|
||||
do_rebind_mount_read_only(dst, MsFlags::empty())?;
|
||||
if readonly {
|
||||
do_rebind_mount(dst, readonly, MsFlags::empty())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -356,7 +356,7 @@ impl Mounter for kata_types::mount::Mount {
|
||||
// Bind mount readonly.
|
||||
let bro_flag = MsFlags::MS_BIND | MsFlags::MS_RDONLY;
|
||||
if (o_flag & bro_flag) == bro_flag {
|
||||
do_rebind_mount_read_only(target, o_flag)?;
|
||||
do_rebind_mount(target, true, o_flag)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -364,12 +364,16 @@ impl Mounter for kata_types::mount::Mount {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn do_rebind_mount_read_only<P: AsRef<Path>>(path: P, flags: MsFlags) -> Result<()> {
|
||||
fn do_rebind_mount<P: AsRef<Path>>(path: P, readonly: bool, flags: MsFlags) -> Result<()> {
|
||||
mount(
|
||||
Some(""),
|
||||
path.as_ref(),
|
||||
Some(""),
|
||||
flags | MsFlags::MS_BIND | MsFlags::MS_REMOUNT | MsFlags::MS_RDONLY,
|
||||
if readonly {
|
||||
flags | MsFlags::MS_BIND | MsFlags::MS_REMOUNT | MsFlags::MS_RDONLY
|
||||
} else {
|
||||
flags | MsFlags::MS_BIND | MsFlags::MS_REMOUNT
|
||||
},
|
||||
Some(""),
|
||||
)
|
||||
.map_err(|e| Error::Remount(path.as_ref().to_path_buf(), e))
|
||||
@@ -820,21 +824,21 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_bind_remount_read_only() {
|
||||
fn test_bind_remount() {
|
||||
let tmpdir = tempfile::tempdir().unwrap();
|
||||
let tmpdir2 = tempfile::tempdir().unwrap();
|
||||
|
||||
assert!(matches!(
|
||||
bind_remount_read_only(&PathBuf::from("")),
|
||||
bind_remount(&PathBuf::from(""), true),
|
||||
Err(Error::NullMountPointPath)
|
||||
));
|
||||
assert!(matches!(
|
||||
bind_remount_read_only(&PathBuf::from("../______doesn't____exist____nnn")),
|
||||
bind_remount(&PathBuf::from("../______doesn't____exist____nnn"), true),
|
||||
Err(Error::InvalidPath(_))
|
||||
));
|
||||
|
||||
bind_mount_unchecked(tmpdir2.path(), tmpdir.path(), true).unwrap();
|
||||
bind_remount_read_only(tmpdir.path()).unwrap();
|
||||
bind_remount(tmpdir.path(), true).unwrap();
|
||||
umount_timeout(tmpdir.path().to_str().unwrap(), 0).unwrap();
|
||||
}
|
||||
|
||||
|
9
src/runtime-rs/Cargo.lock
generated
9
src/runtime-rs/Cargo.lock
generated
@@ -249,6 +249,12 @@ dependencies = [
|
||||
"rustc-demangle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
@@ -1352,6 +1358,8 @@ dependencies = [
|
||||
name = "kata-types"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64",
|
||||
"bitmask-enum",
|
||||
"byte-unit",
|
||||
"glob",
|
||||
@@ -2288,6 +2296,7 @@ dependencies = [
|
||||
"rtnetlink",
|
||||
"scopeguard",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"test-utils",
|
||||
|
@@ -11,14 +11,15 @@ use share_virtio_fs_inline::ShareVirtioFsInline;
|
||||
mod share_virtio_fs_standalone;
|
||||
use share_virtio_fs_standalone::ShareVirtioFsStandalone;
|
||||
mod utils;
|
||||
use tokio::sync::Mutex;
|
||||
pub use utils::{do_get_guest_path, do_get_guest_share_path, get_host_rw_shared_path};
|
||||
mod virtio_fs_share_mount;
|
||||
use virtio_fs_share_mount::VirtiofsShareMount;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::{collections::HashMap, fmt::Debug, path::PathBuf, sync::Arc};
|
||||
|
||||
use agent::Storage;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use anyhow::{anyhow, Context, Ok, Result};
|
||||
use async_trait::async_trait;
|
||||
use hypervisor::Hypervisor;
|
||||
use kata_types::config::hypervisor::SharedFsInfo;
|
||||
@@ -43,8 +44,10 @@ pub trait ShareFs: Send + Sync {
|
||||
async fn setup_device_before_start_vm(&self, h: &dyn Hypervisor) -> Result<()>;
|
||||
async fn setup_device_after_start_vm(&self, h: &dyn Hypervisor) -> Result<()>;
|
||||
async fn get_storages(&self) -> Result<Vec<Storage>>;
|
||||
fn mounted_info_set(&self) -> Arc<Mutex<HashMap<String, MountedInfo>>>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ShareFsRootfsConfig {
|
||||
// TODO: for nydus v5/v6 need to update ShareFsMount
|
||||
pub cid: String,
|
||||
@@ -54,6 +57,7 @@ pub struct ShareFsRootfsConfig {
|
||||
pub is_rafs: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ShareFsVolumeConfig {
|
||||
pub cid: String,
|
||||
pub source: String,
|
||||
@@ -69,10 +73,61 @@ pub struct ShareFsMountResult {
|
||||
pub storages: Vec<agent::Storage>,
|
||||
}
|
||||
|
||||
/// Save mounted info for sandbox-level shared files.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MountedInfo {
|
||||
// Guest path
|
||||
pub guest_path: PathBuf,
|
||||
// Ref count of containers that uses this volume with read only permission
|
||||
pub ro_ref_count: usize,
|
||||
// Ref count of containers that uses this volume with read write permission
|
||||
pub rw_ref_count: usize,
|
||||
}
|
||||
|
||||
impl MountedInfo {
|
||||
pub fn new(guest_path: PathBuf, readonly: bool) -> Self {
|
||||
Self {
|
||||
guest_path,
|
||||
ro_ref_count: if readonly { 1 } else { 0 },
|
||||
rw_ref_count: if readonly { 0 } else { 1 },
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the mount has read only permission
|
||||
pub fn readonly(&self) -> bool {
|
||||
self.rw_ref_count == 0
|
||||
}
|
||||
|
||||
/// Ref count for all permissions
|
||||
pub fn ref_count(&self) -> usize {
|
||||
self.ro_ref_count + self.rw_ref_count
|
||||
}
|
||||
|
||||
// File/dir name in the form of "sandbox-<uuid>-<file/dir name>"
|
||||
pub fn file_name(&self) -> Result<String> {
|
||||
match self.guest_path.file_name() {
|
||||
Some(file_name) => match file_name.to_str() {
|
||||
Some(file_name) => Ok(file_name.to_owned()),
|
||||
None => Err(anyhow!("failed to get string from {:?}", file_name)),
|
||||
},
|
||||
None => Err(anyhow!(
|
||||
"failed to get file name from the guest_path {:?}",
|
||||
self.guest_path
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait ShareFsMount: Send + Sync {
|
||||
async fn share_rootfs(&self, config: ShareFsRootfsConfig) -> Result<ShareFsMountResult>;
|
||||
async fn share_volume(&self, config: ShareFsVolumeConfig) -> Result<ShareFsMountResult>;
|
||||
/// Upgrade to readwrite permission
|
||||
async fn upgrade_to_rw(&self, file_name: &str) -> Result<()>;
|
||||
/// Downgrade to readonly permission
|
||||
async fn downgrade_to_ro(&self, file_name: &str) -> Result<()>;
|
||||
/// Umount the volume
|
||||
async fn umount(&self, file_name: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
pub fn new(id: &str, config: &SharedFsInfo) -> Result<Arc<dyn ShareFs>> {
|
||||
|
@@ -4,11 +4,14 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use agent::Storage;
|
||||
use anyhow::{Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use hypervisor::Hypervisor;
|
||||
use kata_types::config::hypervisor::SharedFsInfo;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use super::{
|
||||
share_virtio_fs::{
|
||||
@@ -30,6 +33,7 @@ pub struct ShareVirtioFsInlineConfig {
|
||||
pub struct ShareVirtioFsInline {
|
||||
config: ShareVirtioFsInlineConfig,
|
||||
share_fs_mount: Arc<dyn ShareFsMount>,
|
||||
mounted_info_set: Arc<Mutex<HashMap<String, MountedInfo>>>,
|
||||
}
|
||||
|
||||
impl ShareVirtioFsInline {
|
||||
@@ -37,6 +41,7 @@ impl ShareVirtioFsInline {
|
||||
Ok(Self {
|
||||
config: ShareVirtioFsInlineConfig { id: id.to_string() },
|
||||
share_fs_mount: Arc::new(VirtiofsShareMount::new(id)),
|
||||
mounted_info_set: Arc::new(Mutex::new(HashMap::new())),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -77,4 +82,8 @@ impl ShareFs for ShareVirtioFsInline {
|
||||
storages.push(shared_volume);
|
||||
Ok(storages)
|
||||
}
|
||||
|
||||
fn mounted_info_set(&self) -> Arc<Mutex<HashMap<String, MountedInfo>>> {
|
||||
self.mounted_info_set.clone()
|
||||
}
|
||||
}
|
||||
|
@@ -4,7 +4,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::{process::Stdio, sync::Arc};
|
||||
use std::{collections::HashMap, process::Stdio, sync::Arc};
|
||||
|
||||
use agent::Storage;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
@@ -16,13 +16,13 @@ use tokio::{
|
||||
process::{Child, Command},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
RwLock,
|
||||
Mutex, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
use super::{
|
||||
share_virtio_fs::generate_sock_path, utils::ensure_dir_exist, utils::get_host_ro_shared_path,
|
||||
virtio_fs_share_mount::VirtiofsShareMount, ShareFs, ShareFsMount,
|
||||
virtio_fs_share_mount::VirtiofsShareMount, MountedInfo, ShareFs, ShareFsMount,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -38,14 +38,16 @@ pub struct ShareVirtioFsStandaloneConfig {
|
||||
pub virtio_fs_extra_args: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[derive(Default, Debug)]
|
||||
struct ShareVirtioFsStandaloneInner {
|
||||
pid: Option<u32>,
|
||||
}
|
||||
|
||||
pub(crate) struct ShareVirtioFsStandalone {
|
||||
inner: Arc<RwLock<ShareVirtioFsStandaloneInner>>,
|
||||
config: ShareVirtioFsStandaloneConfig,
|
||||
share_fs_mount: Arc<dyn ShareFsMount>,
|
||||
mounted_info_set: Arc<Mutex<HashMap<String, MountedInfo>>>,
|
||||
}
|
||||
|
||||
impl ShareVirtioFsStandalone {
|
||||
@@ -60,6 +62,7 @@ impl ShareVirtioFsStandalone {
|
||||
virtio_fs_extra_args: config.virtio_fs_extra_args.clone(),
|
||||
},
|
||||
share_fs_mount: Arc::new(VirtiofsShareMount::new(id)),
|
||||
mounted_info_set: Arc::new(Mutex::new(HashMap::new())),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -172,4 +175,8 @@ impl ShareFs for ShareVirtioFsStandalone {
|
||||
async fn get_storages(&self) -> Result<Vec<Storage>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn mounted_info_set(&self) -> Arc<Mutex<HashMap<String, MountedInfo>>> {
|
||||
self.mounted_info_set.clone()
|
||||
}
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ pub(crate) fn ensure_dir_exist(path: &Path) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Bind mount the original path to the runtime directory.
|
||||
pub(crate) fn share_to_guest(
|
||||
// absolute path for source
|
||||
source: &str,
|
||||
@@ -37,7 +38,7 @@ pub(crate) fn share_to_guest(
|
||||
// to remount the read only dir mount point directly.
|
||||
if readonly {
|
||||
let dst = do_get_host_path(target, sid, cid, is_volume, true);
|
||||
mount::bind_remount_read_only(&dst).context("bind remount readonly")?;
|
||||
mount::bind_remount(&dst, readonly).context("bind remount readonly")?;
|
||||
}
|
||||
|
||||
Ok(do_get_guest_path(target, cid, is_volume, is_rafs))
|
||||
|
@@ -7,6 +7,7 @@
|
||||
use agent::Storage;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
use kata_sys_util::mount::{bind_remount, umount_timeout};
|
||||
use kata_types::k8s::is_watchable_mount;
|
||||
use kata_types::mount;
|
||||
use nix::sys::stat::stat;
|
||||
@@ -19,10 +20,12 @@ const WATCHABLE_BIND_DEV_TYPE: &str = "watchable-bind";
|
||||
const EPHEMERAL_PATH: &str = "/run/kata-containers/sandbox/ephemeral";
|
||||
|
||||
use super::{
|
||||
utils, ShareFsMount, ShareFsMountResult, ShareFsRootfsConfig, ShareFsVolumeConfig,
|
||||
utils::{self, do_get_host_path},
|
||||
ShareFsMount, ShareFsMountResult, ShareFsRootfsConfig, ShareFsVolumeConfig,
|
||||
KATA_GUEST_SHARE_DIR, PASSTHROUGH_FS_DIR,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VirtiofsShareMount {
|
||||
id: String,
|
||||
}
|
||||
@@ -166,4 +169,35 @@ impl ShareFsMount for VirtiofsShareMount {
|
||||
storages: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
async fn upgrade_to_rw(&self, file_name: &str) -> Result<()> {
|
||||
// Remount readonly directory with readwrite permission
|
||||
let host_dest = do_get_host_path(file_name, &self.id, "", true, true);
|
||||
bind_remount(&host_dest, false)
|
||||
.context("remount readonly directory with readwrite permission")?;
|
||||
// Remount readwrite directory with readwrite permission
|
||||
let host_dest = do_get_host_path(file_name, &self.id, "", true, false);
|
||||
bind_remount(&host_dest, false)
|
||||
.context("remount readwrite directory with readwrite permission")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn downgrade_to_ro(&self, file_name: &str) -> Result<()> {
|
||||
// Remount readwrite directory with readonly permission
|
||||
let host_dest = do_get_host_path(file_name, &self.id, "", true, false);
|
||||
bind_remount(&host_dest, true)
|
||||
.context("remount readwrite directory with readonly permission")?;
|
||||
// Remount readonly directory with readonly permission
|
||||
let host_dest = do_get_host_path(file_name, &self.id, "", true, true);
|
||||
bind_remount(&host_dest, true)
|
||||
.context("remount readonly directory with readonly permission")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn umount(&self, file_name: &str) -> Result<()> {
|
||||
let host_dest = do_get_host_path(file_name, &self.id, "", true, true);
|
||||
umount_timeout(&host_dest, 0).context("Umount readwrite host dest")?;
|
||||
// Umount event will be propagated to ro directory
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -5,9 +5,11 @@
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Volume;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct BlockVolume {}
|
||||
|
||||
/// BlockVolume: block device volume
|
||||
@@ -17,6 +19,7 @@ impl BlockVolume {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Volume for BlockVolume {
|
||||
fn get_volume_mount(&self) -> anyhow::Result<Vec<oci::Mount>> {
|
||||
todo!()
|
||||
@@ -26,8 +29,9 @@ impl Volume for BlockVolume {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn cleanup(&self) -> Result<()> {
|
||||
todo!()
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
warn!(sl!(), "Cleaning up BlockVolume is still unimplemented.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -5,9 +5,11 @@
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Volume;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DefaultVolume {
|
||||
mount: oci::Mount,
|
||||
}
|
||||
@@ -21,6 +23,7 @@ impl DefaultVolume {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Volume for DefaultVolume {
|
||||
fn get_volume_mount(&self) -> anyhow::Result<Vec<oci::Mount>> {
|
||||
Ok(vec![self.mount.clone()])
|
||||
@@ -30,7 +33,8 @@ impl Volume for DefaultVolume {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn cleanup(&self) -> Result<()> {
|
||||
todo!()
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
warn!(sl!(), "Cleaning up DefaultVolume is still unimplemented.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ mod block_volume;
|
||||
mod default_volume;
|
||||
mod share_fs_volume;
|
||||
mod shm_volume;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use std::{sync::Arc, vec::Vec};
|
||||
|
||||
@@ -16,10 +17,11 @@ use tokio::sync::RwLock;
|
||||
|
||||
use crate::share_fs::ShareFs;
|
||||
|
||||
#[async_trait]
|
||||
pub trait Volume: Send + Sync {
|
||||
fn get_volume_mount(&self) -> Result<Vec<oci::Mount>>;
|
||||
fn get_storage(&self) -> Result<Vec<agent::Storage>>;
|
||||
fn cleanup(&self) -> Result<()>;
|
||||
async fn cleanup(&self) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
|
@@ -4,12 +4,17 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use std::{path::Path, sync::Arc};
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Volume;
|
||||
use crate::share_fs::{ShareFs, ShareFsVolumeConfig};
|
||||
use crate::share_fs::{MountedInfo, ShareFs, ShareFsVolumeConfig};
|
||||
use kata_types::mount;
|
||||
|
||||
// copy file to container's rootfs if filesystem sharing is not supported, otherwise
|
||||
@@ -19,6 +24,7 @@ use kata_types::mount;
|
||||
// device nodes to the guest.
|
||||
// skip the volumes whose source had already set to guest share dir.
|
||||
pub(crate) struct ShareFsVolume {
|
||||
share_fs: Option<Weak<dyn ShareFs>>,
|
||||
mounts: Vec<oci::Mount>,
|
||||
storages: Vec<agent::Storage>,
|
||||
}
|
||||
@@ -29,10 +35,12 @@ impl ShareFsVolume {
|
||||
m: &oci::Mount,
|
||||
cid: &str,
|
||||
) -> Result<Self> {
|
||||
// The file_name is in the format of "sandbox-{uuid}-{file_name}"
|
||||
let file_name = Path::new(&m.source).file_name().unwrap().to_str().unwrap();
|
||||
let file_name = generate_mount_path(cid, file_name);
|
||||
let file_name = generate_mount_path("sandbox", file_name);
|
||||
|
||||
let mut volume = Self {
|
||||
share_fs: share_fs.as_ref().map(Arc::downgrade),
|
||||
mounts: vec![],
|
||||
storages: vec![],
|
||||
};
|
||||
@@ -59,36 +67,87 @@ impl ShareFsVolume {
|
||||
}
|
||||
}
|
||||
Some(share_fs) => {
|
||||
let readonly = m.options.iter().any(|opt| opt == "ro");
|
||||
|
||||
let share_fs_mount = share_fs.get_share_fs_mount();
|
||||
let mount_result = share_fs_mount
|
||||
.share_volume(ShareFsVolumeConfig {
|
||||
cid: cid.to_string(),
|
||||
source: m.source.clone(),
|
||||
target: file_name,
|
||||
readonly: m.options.iter().any(|o| *o == "ro"),
|
||||
mount_options: m.options.clone(),
|
||||
mount: m.clone(),
|
||||
is_rafs: false,
|
||||
let mounted_info_set = share_fs.mounted_info_set();
|
||||
let mut mounted_info_set = mounted_info_set.lock().await;
|
||||
if let Some(mut mounted_info) = mounted_info_set.get(&m.source).cloned() {
|
||||
// Mounted at least once
|
||||
let guest_path = mounted_info
|
||||
.guest_path
|
||||
.clone()
|
||||
.as_os_str()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_owned();
|
||||
if !readonly && mounted_info.readonly() {
|
||||
// The current mount should be upgraded to readwrite permission
|
||||
info!(
|
||||
sl!(),
|
||||
"The mount will be upgraded, mount = {:?}, cid = {}", m, cid
|
||||
);
|
||||
share_fs_mount
|
||||
.upgrade_to_rw(
|
||||
&mounted_info
|
||||
.file_name()
|
||||
.context("get name of mounted info")?,
|
||||
)
|
||||
.await
|
||||
.context("upgrade mount")?;
|
||||
}
|
||||
if readonly {
|
||||
mounted_info.ro_ref_count += 1;
|
||||
} else {
|
||||
mounted_info.rw_ref_count += 1;
|
||||
}
|
||||
mounted_info_set.insert(m.source.clone(), mounted_info);
|
||||
|
||||
volume.mounts.push(oci::Mount {
|
||||
destination: m.destination.clone(),
|
||||
r#type: "bind".to_string(),
|
||||
source: guest_path,
|
||||
options: m.options.clone(),
|
||||
})
|
||||
.await
|
||||
.context("share fs volume")?;
|
||||
} else {
|
||||
// Not mounted ever
|
||||
let mount_result = share_fs_mount
|
||||
.share_volume(ShareFsVolumeConfig {
|
||||
// The scope of shared volume is sandbox
|
||||
cid: String::from(""),
|
||||
source: m.source.clone(),
|
||||
target: file_name.clone(),
|
||||
readonly,
|
||||
mount_options: m.options.clone(),
|
||||
mount: m.clone(),
|
||||
is_rafs: false,
|
||||
})
|
||||
.await
|
||||
.context("mount shared volume")?;
|
||||
let mounted_info = MountedInfo::new(
|
||||
PathBuf::from_str(&mount_result.guest_path)
|
||||
.context("convert guest path")?,
|
||||
readonly,
|
||||
);
|
||||
mounted_info_set.insert(m.source.clone(), mounted_info);
|
||||
// set storages for the volume
|
||||
volume.storages = mount_result.storages;
|
||||
|
||||
// set storages for the volume
|
||||
volume.storages = mount_result.storages;
|
||||
|
||||
// set mount for the volume
|
||||
volume.mounts.push(oci::Mount {
|
||||
destination: m.destination.clone(),
|
||||
r#type: "bind".to_string(),
|
||||
source: mount_result.guest_path,
|
||||
options: m.options.clone(),
|
||||
});
|
||||
// set mount for the volume
|
||||
volume.mounts.push(oci::Mount {
|
||||
destination: m.destination.clone(),
|
||||
r#type: "bind".to_string(),
|
||||
source: mount_result.guest_path,
|
||||
options: m.options.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(volume)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Volume for ShareFsVolume {
|
||||
fn get_volume_mount(&self) -> anyhow::Result<Vec<oci::Mount>> {
|
||||
Ok(self.mounts.clone())
|
||||
@@ -98,8 +157,75 @@ impl Volume for ShareFsVolume {
|
||||
Ok(self.storages.clone())
|
||||
}
|
||||
|
||||
fn cleanup(&self) -> Result<()> {
|
||||
todo!()
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
if self.share_fs.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
let share_fs = match self.share_fs.as_ref().unwrap().upgrade() {
|
||||
Some(share_fs) => share_fs,
|
||||
None => return Err(anyhow!("The share_fs was released unexpectedly")),
|
||||
};
|
||||
|
||||
let mounted_info_set = share_fs.mounted_info_set();
|
||||
let mut mounted_info_set = mounted_info_set.lock().await;
|
||||
for m in self.mounts.iter() {
|
||||
let (host_source, mut mounted_info) = match mounted_info_set
|
||||
.iter()
|
||||
.find(|entry| entry.1.guest_path.as_os_str().to_str().unwrap() == m.source)
|
||||
.map(|entry| (entry.0.to_owned(), entry.1.clone()))
|
||||
{
|
||||
Some(entry) => entry,
|
||||
None => {
|
||||
warn!(
|
||||
sl!(),
|
||||
"The mounted info for guest path {} not found", m.source
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let old_readonly = mounted_info.readonly();
|
||||
|
||||
if m.options.iter().any(|opt| *opt == "ro") {
|
||||
mounted_info.ro_ref_count -= 1;
|
||||
} else {
|
||||
mounted_info.rw_ref_count -= 1;
|
||||
}
|
||||
|
||||
debug!(
|
||||
sl!(),
|
||||
"Ref count for {} was updated to {} due to volume cleanup",
|
||||
host_source,
|
||||
mounted_info.ref_count()
|
||||
);
|
||||
let share_fs_mount = share_fs.get_share_fs_mount();
|
||||
let file_name = mounted_info.file_name()?;
|
||||
|
||||
if mounted_info.ref_count() > 0 {
|
||||
// Downgrade to readonly if no container needs readwrite permission
|
||||
if !old_readonly && mounted_info.readonly() {
|
||||
info!(sl!(), "Downgrade {} to readonly due to no container that needs readwrite permission", host_source);
|
||||
share_fs_mount
|
||||
.downgrade_to_ro(&file_name)
|
||||
.await
|
||||
.context("Downgrade volume")?;
|
||||
}
|
||||
mounted_info_set.insert(host_source.clone(), mounted_info);
|
||||
} else {
|
||||
info!(
|
||||
sl!(),
|
||||
"The path will be umounted due to no references, host_source = {}", host_source
|
||||
);
|
||||
mounted_info_set.remove(&host_source);
|
||||
// Umount the volume
|
||||
share_fs_mount
|
||||
.umount(&file_name)
|
||||
.await
|
||||
.context("Umount volume")?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
|
||||
use super::Volume;
|
||||
use crate::share_fs::DEFAULT_KATA_GUEST_SANDBOX_DIR;
|
||||
@@ -19,6 +20,7 @@ pub const DEFAULT_SHM_SIZE: u64 = 65536 * 1024;
|
||||
// KATA_EPHEMERAL_DEV_TYPE creates a tmpfs backed volume for sharing files between containers.
|
||||
pub const KATA_EPHEMERAL_DEV_TYPE: &str = "ephemeral";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ShmVolume {
|
||||
mount: oci::Mount,
|
||||
storage: Option<agent::Storage>,
|
||||
@@ -82,6 +84,7 @@ impl ShmVolume {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Volume for ShmVolume {
|
||||
fn get_volume_mount(&self) -> anyhow::Result<Vec<oci::Mount>> {
|
||||
Ok(vec![self.mount.clone()])
|
||||
@@ -96,8 +99,9 @@ impl Volume for ShmVolume {
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
fn cleanup(&self) -> Result<()> {
|
||||
todo!()
|
||||
async fn cleanup(&self) -> Result<()> {
|
||||
warn!(sl!(), "Cleaning up ShmVolume is still unimplemented.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -258,7 +258,7 @@ impl Container {
|
||||
signal: u32,
|
||||
all: bool,
|
||||
) -> Result<()> {
|
||||
let inner = self.inner.read().await;
|
||||
let mut inner = self.inner.write().await;
|
||||
inner.signal_process(container_process, signal, all).await
|
||||
}
|
||||
|
||||
|
@@ -233,7 +233,7 @@ impl ContainerInner {
|
||||
}
|
||||
|
||||
pub(crate) async fn signal_process(
|
||||
&self,
|
||||
&mut self,
|
||||
process: &ContainerProcess,
|
||||
signal: u32,
|
||||
all: bool,
|
||||
@@ -247,6 +247,9 @@ impl ContainerInner {
|
||||
self.agent
|
||||
.signal_process(agent::SignalProcessRequest { process_id, signal })
|
||||
.await?;
|
||||
|
||||
self.clean_volumes().await.context("clean volumes")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -268,4 +271,23 @@ impl ContainerInner {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clean_volumes(&mut self) -> Result<()> {
|
||||
let mut unhandled = Vec::new();
|
||||
for v in self.volumes.iter() {
|
||||
if let Err(err) = v.cleanup().await {
|
||||
unhandled.push(Arc::clone(v));
|
||||
warn!(
|
||||
sl!(),
|
||||
"Failed to clean volume {:?}, error = {:?}",
|
||||
v.get_volume_mount(),
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
if !unhandled.is_empty() {
|
||||
self.volumes = unhandled;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user