Merge pull request #6331 from jepio/jepio/fix-agent-init-cgroups

rustjail: fix cgroup handling in agent-init mode
This commit is contained in:
Fabiano Fidêncio 2023-03-05 20:29:40 +01:00 committed by GitHub
commit df35f8f885
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 46 additions and 21 deletions

View File

@ -267,6 +267,10 @@ impl CgroupManager for Manager {
fn as_any(&self) -> Result<&dyn Any> { fn as_any(&self) -> Result<&dyn Any> {
Ok(self) Ok(self)
} }
fn name(&self) -> &str {
"cgroupfs"
}
} }
fn set_network_resources( fn set_network_resources(

View File

@ -66,6 +66,10 @@ impl CgroupManager for Manager {
fn as_any(&self) -> Result<&dyn Any> { fn as_any(&self) -> Result<&dyn Any> {
Ok(self) Ok(self)
} }
fn name(&self) -> &str {
"mock"
}
} }
impl Manager { impl Manager {

View File

@ -52,10 +52,12 @@ pub trait Manager {
fn as_any(&self) -> Result<&dyn Any> { fn as_any(&self) -> Result<&dyn Any> {
Err(anyhow!("not supported!")) Err(anyhow!("not supported!"))
} }
fn name(&self) -> &str;
} }
impl Debug for dyn Manager + Send + Sync { impl Debug for dyn Manager + Send + Sync {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "CgroupManager") write!(f, "{}", self.name())
} }
} }

View File

@ -101,6 +101,10 @@ impl CgroupManager for Manager {
fn as_any(&self) -> Result<&dyn Any> { fn as_any(&self) -> Result<&dyn Any> {
Ok(self) Ok(self)
} }
fn name(&self) -> &str {
"systemd"
}
} }
impl Manager { impl Manager {

View File

@ -1473,7 +1473,7 @@ impl LinuxContainer {
pub fn new<T: Into<String> + Display + Clone>( pub fn new<T: Into<String> + Display + Clone>(
id: T, id: T,
base: T, base: T,
mut config: Config, config: Config,
logger: &Logger, logger: &Logger,
) -> Result<Self> { ) -> Result<Self> {
let base = base.into(); let base = base.into();
@ -1499,26 +1499,18 @@ impl LinuxContainer {
.context(format!("Cannot change owner of container {} root", id))?; .context(format!("Cannot change owner of container {} root", id))?;
let spec = config.spec.as_ref().unwrap(); let spec = config.spec.as_ref().unwrap();
let linux = spec.linux.as_ref().unwrap(); let linux = spec.linux.as_ref().unwrap();
let cpath = if config.use_systemd_cgroup {
// determine which cgroup driver to take and then assign to config.use_systemd_cgroup
// systemd: "[slice]:[prefix]:[name]"
// fs: "/path_a/path_b"
let cpath = if SYSTEMD_CGROUP_PATH_FORMAT.is_match(linux.cgroups_path.as_str()) {
config.use_systemd_cgroup = true;
if linux.cgroups_path.len() == 2 { if linux.cgroups_path.len() == 2 {
format!("system.slice:kata_agent:{}", id.as_str()) format!("system.slice:kata_agent:{}", id.as_str())
} else { } else {
linux.cgroups_path.clone() linux.cgroups_path.clone()
} }
} else if linux.cgroups_path.is_empty() {
format!("/{}", id.as_str())
} else { } else {
config.use_systemd_cgroup = false; // if we have a systemd cgroup path we need to convert it to a fs cgroup path
if linux.cgroups_path.is_empty() { linux.cgroups_path.replace(':', "/")
format!("/{}", id.as_str())
} else {
linux.cgroups_path.clone()
}
}; };
let cgroup_manager: Box<dyn Manager + Send + Sync> = if config.use_systemd_cgroup { let cgroup_manager: Box<dyn Manager + Send + Sync> = if config.use_systemd_cgroup {

View File

@ -339,7 +339,7 @@ async fn start_sandbox(
sandbox.lock().await.sender = Some(tx); sandbox.lock().await.sender = Some(tx);
// vsock:///dev/vsock, port // vsock:///dev/vsock, port
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str())?; let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode)?;
server.start().await?; server.start().await?;
rx.await?; rx.await?;

View File

@ -36,7 +36,7 @@ use protocols::health::{
use protocols::types::Interface; use protocols::types::Interface;
use protocols::{agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc}; use protocols::{agent_ttrpc_async as agent_ttrpc, health_ttrpc_async as health_ttrpc};
use rustjail::cgroups::notifier; use rustjail::cgroups::notifier;
use rustjail::container::{BaseContainer, Container, LinuxContainer}; use rustjail::container::{BaseContainer, Container, LinuxContainer, SYSTEMD_CGROUP_PATH_FORMAT};
use rustjail::process::Process; use rustjail::process::Process;
use rustjail::specconv::CreateOpts; use rustjail::specconv::CreateOpts;
@ -137,6 +137,7 @@ macro_rules! is_allowed {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct AgentService { pub struct AgentService {
sandbox: Arc<Mutex<Sandbox>>, sandbox: Arc<Mutex<Sandbox>>,
init_mode: bool,
} }
impl AgentService { impl AgentService {
@ -210,9 +211,20 @@ impl AgentService {
// restore the cwd for kata-agent process. // restore the cwd for kata-agent process.
defer!(unistd::chdir(&olddir).unwrap()); defer!(unistd::chdir(&olddir).unwrap());
// determine which cgroup driver to take and then assign to use_systemd_cgroup
// systemd: "[slice]:[prefix]:[name]"
// fs: "/path_a/path_b"
// If agent is init we can't use systemd cgroup mode, no matter what the host tells us
let cgroups_path = oci.linux.as_ref().map_or("", |linux| &linux.cgroups_path);
let use_systemd_cgroup = if self.init_mode {
false
} else {
SYSTEMD_CGROUP_PATH_FORMAT.is_match(cgroups_path)
};
let opts = CreateOpts { let opts = CreateOpts {
cgroup_name: "".to_string(), cgroup_name: "".to_string(),
use_systemd_cgroup: false, use_systemd_cgroup,
no_pivot_root: s.no_pivot_root, no_pivot_root: s.no_pivot_root,
no_new_keyring: false, no_new_keyring: false,
spec: Some(oci.clone()), spec: Some(oci.clone()),
@ -1673,9 +1685,11 @@ async fn read_stream(reader: Arc<Mutex<ReadHalf<PipeStream>>>, l: usize) -> Resu
Ok(content) Ok(content)
} }
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str) -> Result<TtrpcServer> { pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str, init_mode: bool) -> Result<TtrpcServer> {
let agent_service = let agent_service = Box::new(AgentService {
Box::new(AgentService { sandbox: s }) as Box<dyn agent_ttrpc::AgentService + Send + Sync>; sandbox: s,
init_mode,
}) as Box<dyn agent_ttrpc::AgentService + Send + Sync>;
let agent_worker = Arc::new(agent_service); let agent_worker = Arc::new(agent_service);
@ -2146,6 +2160,7 @@ mod tests {
let agent_service = Box::new(AgentService { let agent_service = Box::new(AgentService {
sandbox: Arc::new(Mutex::new(sandbox)), sandbox: Arc::new(Mutex::new(sandbox)),
init_mode: true,
}); });
let req = protocols::agent::UpdateInterfaceRequest::default(); let req = protocols::agent::UpdateInterfaceRequest::default();
@ -2163,6 +2178,7 @@ mod tests {
let agent_service = Box::new(AgentService { let agent_service = Box::new(AgentService {
sandbox: Arc::new(Mutex::new(sandbox)), sandbox: Arc::new(Mutex::new(sandbox)),
init_mode: true,
}); });
let req = protocols::agent::UpdateRoutesRequest::default(); let req = protocols::agent::UpdateRoutesRequest::default();
@ -2180,6 +2196,7 @@ mod tests {
let agent_service = Box::new(AgentService { let agent_service = Box::new(AgentService {
sandbox: Arc::new(Mutex::new(sandbox)), sandbox: Arc::new(Mutex::new(sandbox)),
init_mode: true,
}); });
let req = protocols::agent::AddARPNeighborsRequest::default(); let req = protocols::agent::AddARPNeighborsRequest::default();
@ -2313,6 +2330,7 @@ mod tests {
let agent_service = Box::new(AgentService { let agent_service = Box::new(AgentService {
sandbox: Arc::new(Mutex::new(sandbox)), sandbox: Arc::new(Mutex::new(sandbox)),
init_mode: true,
}); });
let result = agent_service let result = agent_service
@ -2793,6 +2811,7 @@ OtherField:other
let sandbox = Sandbox::new(&logger).unwrap(); let sandbox = Sandbox::new(&logger).unwrap();
let agent_service = Box::new(AgentService { let agent_service = Box::new(AgentService {
sandbox: Arc::new(Mutex::new(sandbox)), sandbox: Arc::new(Mutex::new(sandbox)),
init_mode: true,
}); });
let ctx = mk_ttrpc_context(); let ctx = mk_ttrpc_context();