agent: Make devcg_info optional for LinuxContainer::new()

The runk is a standard OCI runtime that isnt' aware of concept of sandbox.
Therefore, the `devcg_info` argument of `LinuxContainer::new()` is
unneccessary to be provided.

Signed-off-by: Xuewei Niu <niuxuewei.nxw@antgroup.com>
This commit is contained in:
Xuewei Niu 2023-08-10 15:44:50 +08:00
parent ef4c3844a3
commit cec8044744
7 changed files with 87 additions and 90 deletions

View File

@ -65,7 +65,7 @@ pub struct Manager {
#[serde(skip)] #[serde(skip)]
cgroup: cgroups::Cgroup, cgroup: cgroups::Cgroup,
#[serde(skip)] #[serde(skip)]
pod_cgroup: cgroups::Cgroup, pod_cgroup: Option<cgroups::Cgroup>,
#[serde(skip)] #[serde(skip)]
devcg_whitelist: bool, devcg_whitelist: bool,
} }
@ -135,7 +135,9 @@ impl CgroupManager for Manager {
); );
// apply resources // apply resources
self.pod_cgroup.apply(pod_res)?; if let Some(pod_cg) = self.pod_cgroup.as_ref() {
pod_cg.apply(pod_res)?;
}
self.cgroup.apply(res)?; self.cgroup.apply(res)?;
Ok(()) Ok(())
@ -1034,64 +1036,70 @@ impl Manager {
pub fn new( pub fn new(
cpath: &str, cpath: &str,
spec: &Spec, spec: &Spec,
devcg_info: Arc<RwLock<DevicesCgroupInfo>>, devcg_info: Option<Arc<RwLock<DevicesCgroupInfo>>>,
) -> Result<Self> { ) -> Result<Self> {
let (paths, mounts) = Self::get_paths_and_mounts(cpath)?; let (paths, mounts) = Self::get_paths_and_mounts(cpath)?;
// Do not expect poisoning lock // Do not expect poisoning lock
let mut devices_group_info = devcg_info.write().unwrap(); let mut devices_group_info = devcg_info.as_ref().map(|i| i.write().unwrap());
let pod_cgroup: Option<Cgroup>;
// Cgroup path of parent of container if let Some(devices_group_info) = devices_group_info.as_mut() {
let pod_cpath = PathBuf::from(cpath) // Cgroup path of parent of container
.parent() let pod_cpath = PathBuf::from(cpath)
.context("Get parent of cgroup path")? .parent()
.display() .context("Get parent of cgroup path")?
.to_string(); .display()
.to_string();
// Create a cgroup for the pod if not exists. // Create a cgroup for the pod if not exists.
// Note that this step MUST be done before the pause container's // Note that this step MUST be done before the pause container's
// cgroup created, since children inherit upper nodes' rules, which // cgroup created, since children inherit upper nodes' rules, which
// have excessive permission. You'll feel painful to shrink upper // have excessive permission. You'll feel painful to shrink upper
// nodes' permission. // nodes' permission.
let pod_cg = load_cgroup(cgroups::hierarchies::auto(), &pod_cpath); pod_cgroup = Some(load_cgroup(cgroups::hierarchies::auto(), &pod_cpath));
let pod_cg = pod_cgroup.as_ref().unwrap();
// The default is blacklist mode. // The default is blacklist mode.
let is_whitelist_mode = Self::devcg_whitelist_rule(spec).unwrap_or(false); let is_whitelist_mode = Self::devcg_whitelist_rule(spec).unwrap_or(false);
if devices_group_info.inited { if devices_group_info.inited {
// Do nothing for whitelist except for updating whitelist // Do nothing for whitelist except for updating whitelist
// field of devices_group_info. // field of devices_group_info.
debug!(sl(), "Devices cgroup has been initialzied."); debug!(sl(), "Devices cgroup has been initialzied.");
// Set allowed all devices to pod cgroup // Set allowed all devices to pod cgroup
if !devices_group_info.whitelist && is_whitelist_mode { if !devices_group_info.whitelist && is_whitelist_mode {
info!( info!(
sl(), sl(),
"Pod devices cgroup is changed to whitelist mode, devices_group_info = {:?}", "Pod devices cgroup is changed to whitelist mode, devices_group_info = {:?}",
devices_group_info devices_group_info
); );
Self::setup_whitelist_mode(&pod_cg) Self::setup_whitelist_mode(pod_cg)
.with_context(|| format!("Setup whitelist mode for {}", pod_cpath))?; .with_context(|| format!("Setup whitelist mode for {}", pod_cpath))?;
devices_group_info.whitelist = true; devices_group_info.whitelist = true;
}
} else {
// This is the first container (aka pause container)
debug!(sl(), "Started to init devices cgroup");
pod_cg.create().context("Create pod cgroup")?;
// Setup blacklist for pod cgroup
if !is_whitelist_mode {
// Setup blacklist rule and allowed default devices for
// blacklist.
Self::setup_blacklist_mode(pod_cg)
.with_context(|| format!("Setup blacklist mode for {}", pod_cpath))?;
} else {
Self::setup_whitelist_mode(pod_cg)
.with_context(|| format!("Setup whitelist mode for {}", pod_cpath))?;
devices_group_info.whitelist = true;
}
devices_group_info.inited = true
} }
} else { } else {
// This is the first container (aka pause container) pod_cgroup = None;
debug!(sl(), "Started to init devices cgroup");
pod_cg.create().context("Create pod cgroup")?;
// Setup blacklist for pod cgroup
if !is_whitelist_mode {
// Setup blacklist rule and allowed default devices for
// blacklist.
Self::setup_blacklist_mode(&pod_cg)
.with_context(|| format!("Setup blacklist mode for {}", pod_cpath))?;
} else {
Self::setup_whitelist_mode(&pod_cg)
.with_context(|| format!("Setup whitelist mode for {}", pod_cpath))?;
devices_group_info.whitelist = true;
}
devices_group_info.inited = true
} }
// Create a cgroup for the container. // Create a cgroup for the container.
@ -1099,9 +1107,11 @@ impl Manager {
// The rules of container cgroup are copied from its parent, which // The rules of container cgroup are copied from its parent, which
// contains some permissions that the container doesn't need. // contains some permissions that the container doesn't need.
// Therefore, resetting the container's devices cgroup is required. // Therefore, resetting the container's devices cgroup is required.
if !devices_group_info.whitelist { if let Some(devices_group_info) = devices_group_info.as_ref() {
Self::setup_blacklist_mode(&cg) if !devices_group_info.whitelist {
.with_context(|| format!("Setup blacklist mode for {}", cpath))?; Self::setup_blacklist_mode(&cg)
.with_context(|| format!("Setup blacklist mode for {}", cpath))?;
}
} }
Ok(Self { Ok(Self {
@ -1110,8 +1120,10 @@ impl Manager {
// rels: paths, // rels: paths,
cpath: cpath.to_string(), cpath: cpath.to_string(),
cgroup: cg, cgroup: cg,
pod_cgroup: pod_cg, pod_cgroup,
devcg_whitelist: devices_group_info.whitelist, devcg_whitelist: devices_group_info
.map(|info| info.whitelist)
.unwrap_or(false),
}) })
} }
@ -1123,21 +1135,13 @@ impl Manager {
pub fn new_read_only(cpath: &str) -> Result<Self> { pub fn new_read_only(cpath: &str) -> Result<Self> {
let (paths, mounts) = Self::get_paths_and_mounts(cpath)?; let (paths, mounts) = Self::get_paths_and_mounts(cpath)?;
// Cgroup path of parent of container
let pod_cpath = PathBuf::from(cpath)
.parent()
.context("Get parent of cgroup path")?
.display()
.to_string();
let pod_cg = load_cgroup(cgroups::hierarchies::auto(), &pod_cpath);
let cg = load_cgroup(cgroups::hierarchies::auto(), cpath); let cg = load_cgroup(cgroups::hierarchies::auto(), cpath);
Ok(Self { Ok(Self {
paths, paths,
mounts, mounts,
cpath: cpath.to_string(), cpath: cpath.to_string(),
pod_cgroup: pod_cg, pod_cgroup: None,
cgroup: cg, cgroup: cg,
devcg_whitelist: false, devcg_whitelist: false,
}) })
@ -1489,8 +1493,9 @@ mod tests {
}), }),
..Default::default() ..Default::default()
}; };
managers managers.push(
.push(Manager::new(&tc.cpath[cid], &spec, sandbox.devcg_info.clone()).unwrap()); Manager::new(&tc.cpath[cid], &spec, Some(sandbox.devcg_info.clone())).unwrap(),
);
let devcg_info = sandbox.devcg_info.read().unwrap(); let devcg_info = sandbox.devcg_info.read().unwrap();
assert!(devcg_info.inited); assert!(devcg_info.inited);
@ -1501,7 +1506,6 @@ mod tests {
); );
drop(devcg_info); drop(devcg_info);
// TODO: Assertions go here
let pod_devices_list = Command::new("cat") let pod_devices_list = Command::new("cat")
.arg("/sys/fs/cgroup/devices/kata-agent-fs-manager-test/devices.list") .arg("/sys/fs/cgroup/devices/kata-agent-fs-manager-test/devices.list")
.output() .output()
@ -1526,7 +1530,8 @@ mod tests {
managers managers
.iter() .iter()
.for_each(|manager| manager.cgroup.delete().unwrap()); .for_each(|manager| manager.cgroup.delete().unwrap());
managers[0].pod_cgroup.delete().unwrap(); // The pod_cgroup must not be None
managers[0].pod_cgroup.as_ref().unwrap().delete().unwrap();
} }
} }
} }

View File

@ -78,7 +78,7 @@ impl Manager {
pub fn new( pub fn new(
cpath: &str, cpath: &str,
_spec: &Spec, _spec: &Spec,
_devcg_info: Arc<RwLock<DevicesCgroupInfo>>, _devcg_info: Option<Arc<RwLock<DevicesCgroupInfo>>>,
) -> Result<Self> { ) -> Result<Self> {
Ok(Self { Ok(Self {
paths: HashMap::new(), paths: HashMap::new(),

View File

@ -1494,7 +1494,7 @@ impl LinuxContainer {
pub fn new<T: Into<String> + Display + Clone>( pub fn new<T: Into<String> + Display + Clone>(
id: T, id: T,
base: T, base: T,
devcg_info: Arc<RwLock<DevicesCgroupInfo>>, devcg_info: Option<Arc<RwLock<DevicesCgroupInfo>>>,
config: Config, config: Config,
logger: &Logger, logger: &Logger,
) -> Result<Self> { ) -> Result<Self> {
@ -1775,7 +1775,7 @@ mod tests {
LinuxContainer::new( LinuxContainer::new(
"some_id", "some_id",
&dir.path().join("rootfs").to_str().unwrap(), &dir.path().join("rootfs").to_str().unwrap(),
Arc::new(RwLock::new(DevicesCgroupInfo::default())), None,
create_dummy_opts(), create_dummy_opts(),
&slog_scope::logger(), &slog_scope::logger(),
), ),
@ -1805,14 +1805,10 @@ mod tests {
#[test] #[test]
fn test_linuxcontainer_pause() { fn test_linuxcontainer_pause() {
let ret = new_linux_container_and_then(|mut c: LinuxContainer| { let ret = new_linux_container_and_then(|mut c: LinuxContainer| {
c.cgroup_manager = Box::new( c.cgroup_manager =
FsManager::new( Box::new(FsManager::new("", &Spec::default(), None).map_err(|e| {
"", anyhow!(format!("fail to create cgroup manager with path: {:}", e))
&Spec::default(), })?);
Arc::new(RwLock::new(DevicesCgroupInfo::default())),
)
.map_err(|e| anyhow!(format!("fail to create cgroup manager with path: {:}", e)))?,
);
c.pause().map_err(|e| anyhow!(e)) c.pause().map_err(|e| anyhow!(e))
}); });
@ -1834,14 +1830,10 @@ mod tests {
#[test] #[test]
fn test_linuxcontainer_resume() { fn test_linuxcontainer_resume() {
let ret = new_linux_container_and_then(|mut c: LinuxContainer| { let ret = new_linux_container_and_then(|mut c: LinuxContainer| {
c.cgroup_manager = Box::new( c.cgroup_manager =
FsManager::new( Box::new(FsManager::new("", &Spec::default(), None).map_err(|e| {
"", anyhow!(format!("fail to create cgroup manager with path: {:}", e))
&Spec::default(), })?);
Arc::new(RwLock::new(DevicesCgroupInfo::default())),
)
.map_err(|e| anyhow!(format!("fail to create cgroup manager with path: {:}", e)))?,
);
// Change status to paused, this way we can resume it // Change status to paused, this way we can resume it
c.status.transition(ContainerState::Paused); c.status.transition(ContainerState::Paused);
c.resume().map_err(|e| anyhow!(e)) c.resume().map_err(|e| anyhow!(e))

View File

@ -277,7 +277,7 @@ impl AgentService {
let mut ctr: LinuxContainer = LinuxContainer::new( let mut ctr: LinuxContainer = LinuxContainer::new(
cid.as_str(), cid.as_str(),
CONTAINER_BASE, CONTAINER_BASE,
s.devcg_info.clone(), Some(s.devcg_info.clone()),
opts, opts,
&sl(), &sl(),
)?; )?;
@ -1988,7 +1988,6 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::RwLock;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use super::*; use super::*;
@ -1996,7 +1995,6 @@ mod tests {
use nix::mount; use nix::mount;
use nix::sched::{unshare, CloneFlags}; use nix::sched::{unshare, CloneFlags};
use oci::{Hook, Hooks, Linux, LinuxDeviceCgroup, LinuxNamespace, LinuxResources}; use oci::{Hook, Hooks, Linux, LinuxDeviceCgroup, LinuxNamespace, LinuxResources};
use rustjail::cgroups::DevicesCgroupInfo;
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
use test_utils::{assert_result, skip_if_not_root}; use test_utils::{assert_result, skip_if_not_root};
use ttrpc::{r#async::TtrpcContext, MessageHeader}; use ttrpc::{r#async::TtrpcContext, MessageHeader};
@ -2074,7 +2072,7 @@ mod tests {
LinuxContainer::new( LinuxContainer::new(
"some_id", "some_id",
dir.path().join("rootfs").to_str().unwrap(), dir.path().join("rootfs").to_str().unwrap(),
Arc::new(RwLock::new(DevicesCgroupInfo::default())), None,
create_dummy_opts(), create_dummy_opts(),
&slog_scope::logger(), &slog_scope::logger(),
) )

View File

@ -887,7 +887,7 @@ mod tests {
let container = LinuxContainer::new( let container = LinuxContainer::new(
"some_id", "some_id",
dir.path().join("rootfs").to_str().unwrap(), dir.path().join("rootfs").to_str().unwrap(),
Arc::new(RwLock::new(DevicesCgroupInfo::default())), None,
create_dummy_opts(), create_dummy_opts(),
&slog_scope::logger(), &slog_scope::logger(),
) )

View File

@ -292,9 +292,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]] [[package]]
name = "cgroups-rs" name = "cgroups-rs"
version = "0.3.2" version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b098e7c3a70d03c288fa0a96ccf13e770eb3d78c4cc0e1549b3c13215d5f965" checksum = "1fb3af90c8d48ad5f432d8afb521b5b40c2a2fce46dd60e05912de51c47fba64"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",

View File

@ -361,6 +361,7 @@ pub fn create_linux_container(
.map(|s| s.to_string()) .map(|s| s.to_string())
.ok_or_else(|| anyhow!("failed to convert bundle path"))? .ok_or_else(|| anyhow!("failed to convert bundle path"))?
.as_str(), .as_str(),
None,
config, config,
logger, logger,
)?; )?;
@ -384,6 +385,7 @@ pub fn load_linux_container(
.to_str() .to_str()
.map(|s| s.to_string()) .map(|s| s.to_string())
.ok_or_else(|| anyhow!("failed to convert a root path"))?, .ok_or_else(|| anyhow!("failed to convert a root path"))?,
None,
status.config.clone(), status.config.clone(),
logger, logger,
)?; )?;