Merge pull request #7202 from wedsonaf/macros

Convert `is_allowed`, `ttrpc_error` and `sl` to functions
This commit is contained in:
Bin Liu
2023-07-04 14:23:08 +08:00
committed by GitHub
9 changed files with 245 additions and 266 deletions

View File

@@ -39,11 +39,9 @@ use std::path::Path;
const GUEST_CPUS_PATH: &str = "/sys/devices/system/cpu/online";
// Convenience macro to obtain the scope logger
macro_rules! sl {
() => {
slog_scope::logger().new(o!("subsystem" => "cgroups"))
};
// Convenience function to obtain the scope logger.
fn sl() -> slog::Logger {
slog_scope::logger().new(o!("subsystem" => "cgroups"))
}
macro_rules! get_controller_or_return_singular_none {
@@ -82,7 +80,7 @@ impl CgroupManager for Manager {
fn set(&self, r: &LinuxResources, update: bool) -> Result<()> {
info!(
sl!(),
sl(),
"cgroup manager set resources for container. Resources input {:?}", r
);
@@ -120,7 +118,7 @@ impl CgroupManager for Manager {
// set devices resources
set_devices_resources(&self.cgroup, &r.devices, res);
info!(sl!(), "resources after processed {:?}", res);
info!(sl(), "resources after processed {:?}", res);
// apply resources
self.cgroup.apply(res)?;
@@ -197,7 +195,7 @@ impl CgroupManager for Manager {
if guest_cpuset.is_empty() {
return Ok(());
}
info!(sl!(), "update_cpuset_path to: {}", guest_cpuset);
info!(sl(), "update_cpuset_path to: {}", guest_cpuset);
let h = cgroups::hierarchies::auto();
let root_cg = h.root_control_group();
@@ -205,12 +203,12 @@ impl CgroupManager for Manager {
let root_cpuset_controller: &CpuSetController = root_cg.controller_of().unwrap();
let path = root_cpuset_controller.path();
let root_path = Path::new(path);
info!(sl!(), "root cpuset path: {:?}", &path);
info!(sl(), "root cpuset path: {:?}", &path);
let container_cpuset_controller: &CpuSetController = self.cgroup.controller_of().unwrap();
let path = container_cpuset_controller.path();
let container_path = Path::new(path);
info!(sl!(), "container cpuset path: {:?}", &path);
info!(sl(), "container cpuset path: {:?}", &path);
let mut paths = vec![];
for ancestor in container_path.ancestors() {
@@ -219,7 +217,7 @@ impl CgroupManager for Manager {
}
paths.push(ancestor);
}
info!(sl!(), "parent paths to update cpuset: {:?}", &paths);
info!(sl(), "parent paths to update cpuset: {:?}", &paths);
let mut i = paths.len();
loop {
@@ -233,7 +231,7 @@ impl CgroupManager for Manager {
.to_str()
.unwrap()
.trim_start_matches(root_path.to_str().unwrap());
info!(sl!(), "updating cpuset for parent path {:?}", &r_path);
info!(sl(), "updating cpuset for parent path {:?}", &r_path);
let cg = new_cgroup(cgroups::hierarchies::auto(), r_path)?;
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
cpuset_controller.set_cpus(guest_cpuset)?;
@@ -241,7 +239,7 @@ impl CgroupManager for Manager {
if !container_cpuset.is_empty() {
info!(
sl!(),
sl(),
"updating cpuset for container path: {:?} cpuset: {}",
&container_path,
container_cpuset
@@ -276,7 +274,7 @@ fn set_network_resources(
network: &LinuxNetwork,
res: &mut cgroups::Resources,
) {
info!(sl!(), "cgroup manager set network");
info!(sl(), "cgroup manager set network");
// set classid
// description can be found at https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/net_cls.html
@@ -303,7 +301,7 @@ fn set_devices_resources(
device_resources: &[LinuxDeviceCgroup],
res: &mut cgroups::Resources,
) {
info!(sl!(), "cgroup manager set devices");
info!(sl(), "cgroup manager set devices");
let mut devices = vec![];
for d in device_resources.iter() {
@@ -332,7 +330,7 @@ fn set_hugepages_resources(
hugepage_limits: &[LinuxHugepageLimit],
res: &mut cgroups::Resources,
) {
info!(sl!(), "cgroup manager set hugepage");
info!(sl(), "cgroup manager set hugepage");
let mut limits = vec![];
let hugetlb_controller = cg.controller_of::<HugeTlbController>();
@@ -346,7 +344,7 @@ fn set_hugepages_resources(
limits.push(hr);
} else {
warn!(
sl!(),
sl(),
"{} page size support cannot be verified, dropping requested limit", l.page_size
);
}
@@ -359,7 +357,7 @@ fn set_block_io_resources(
blkio: &LinuxBlockIo,
res: &mut cgroups::Resources,
) {
info!(sl!(), "cgroup manager set block io");
info!(sl(), "cgroup manager set block io");
res.blkio.weight = blkio.weight;
res.blkio.leaf_weight = blkio.leaf_weight;
@@ -387,13 +385,13 @@ fn set_block_io_resources(
}
fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCpu) -> Result<()> {
info!(sl!(), "cgroup manager set cpu");
info!(sl(), "cgroup manager set cpu");
let cpuset_controller: &CpuSetController = cg.controller_of().unwrap();
if !cpu.cpus.is_empty() {
if let Err(e) = cpuset_controller.set_cpus(&cpu.cpus) {
warn!(sl!(), "write cpuset failed: {:?}", e);
warn!(sl(), "write cpuset failed: {:?}", e);
}
}
@@ -424,7 +422,7 @@ fn set_cpu_resources(cg: &cgroups::Cgroup, cpu: &LinuxCpu) -> Result<()> {
}
fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool) -> Result<()> {
info!(sl!(), "cgroup manager set memory");
info!(sl(), "cgroup manager set memory");
let mem_controller: &MemController = cg.controller_of().unwrap();
if !update {
@@ -493,7 +491,7 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
}
fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
info!(sl!(), "cgroup manager set pids");
info!(sl(), "cgroup manager set pids");
let pid_controller: &PidController = cg.controller_of().unwrap();
let v = if pids.limit > 0 {
MaxValue::Value(pids.limit)
@@ -962,7 +960,7 @@ pub fn get_paths() -> Result<HashMap<String, String>> {
for l in fs::read_to_string(PATHS)?.lines() {
let fl: Vec<&str> = l.split(':').collect();
if fl.len() != 3 {
info!(sl!(), "Corrupted cgroup data!");
info!(sl(), "Corrupted cgroup data!");
continue;
}
@@ -983,7 +981,7 @@ pub fn get_mounts(paths: &HashMap<String, String>) -> Result<HashMap<String, Str
let post: Vec<&str> = p[1].split(' ').collect();
if post.len() != 3 {
warn!(sl!(), "can't parse {} line {:?}", MOUNTS, l);
warn!(sl(), "can't parse {} line {:?}", MOUNTS, l);
continue;
}

View File

@@ -16,11 +16,9 @@ use inotify::{Inotify, WatchMask};
use tokio::io::AsyncReadExt;
use tokio::sync::mpsc::{channel, Receiver};
// Convenience macro to obtain the scope logger
macro_rules! sl {
() => {
slog_scope::logger().new(o!("subsystem" => "cgroups_notifier"))
};
// Convenience function to obtain the scope logger.
fn sl() -> slog::Logger {
slog_scope::logger().new(o!("subsystem" => "cgroups_notifier"))
}
pub async fn notify_oom(cid: &str, cg_dir: String) -> Result<Receiver<String>> {
@@ -38,7 +36,7 @@ pub async fn notify_oom(cid: &str, cg_dir: String) -> Result<Receiver<String>> {
fn get_value_from_cgroup(path: &Path, key: &str) -> Result<i64> {
let content = fs::read_to_string(path)?;
info!(
sl!(),
sl(),
"get_value_from_cgroup file: {:?}, content: {}", &path, &content
);
@@ -67,11 +65,11 @@ async fn register_memory_event_v2(
let event_control_path = Path::new(&cg_dir).join(memory_event_name);
let cgroup_event_control_path = Path::new(&cg_dir).join(cgroup_event_name);
info!(
sl!(),
sl(),
"register_memory_event_v2 event_control_path: {:?}", &event_control_path
);
info!(
sl!(),
sl(),
"register_memory_event_v2 cgroup_event_control_path: {:?}", &cgroup_event_control_path
);
@@ -82,8 +80,8 @@ async fn register_memory_event_v2(
// Because no `unix.IN_DELETE|unix.IN_DELETE_SELF` event for cgroup file system, so watching all process exited
let cg_wd = inotify.add_watch(&cgroup_event_control_path, WatchMask::MODIFY)?;
info!(sl!(), "ev_wd: {:?}", ev_wd);
info!(sl!(), "cg_wd: {:?}", cg_wd);
info!(sl(), "ev_wd: {:?}", ev_wd);
info!(sl(), "cg_wd: {:?}", cg_wd);
let (sender, receiver) = channel(100);
let containere_id = containere_id.to_string();
@@ -97,17 +95,17 @@ async fn register_memory_event_v2(
while let Some(event_or_error) = stream.next().await {
let event = event_or_error.unwrap();
info!(
sl!(),
sl(),
"container[{}] get event for container: {:?}", &containere_id, &event
);
// info!("is1: {}", event.wd == wd1);
info!(sl!(), "event.wd: {:?}", event.wd);
info!(sl(), "event.wd: {:?}", event.wd);
if event.wd == ev_wd {
let oom = get_value_from_cgroup(&event_control_path, "oom_kill");
if oom.unwrap_or(0) > 0 {
let _ = sender.send(containere_id.clone()).await.map_err(|e| {
error!(sl!(), "send containere_id failed, error: {:?}", e);
error!(sl(), "send containere_id failed, error: {:?}", e);
});
return;
}
@@ -171,13 +169,13 @@ async fn register_memory_event(
let mut buf = [0u8; 8];
match eventfd_stream.read(&mut buf).await {
Err(err) => {
warn!(sl!(), "failed to read from eventfd: {:?}", err);
warn!(sl(), "failed to read from eventfd: {:?}", err);
return;
}
Ok(_) => {
let content = fs::read_to_string(path.clone());
info!(
sl!(),
sl(),
"cgroup event for container: {}, path: {:?}, content: {:?}",
&containere_id,
&path,
@@ -193,7 +191,7 @@ async fn register_memory_event(
}
let _ = sender.send(containere_id.clone()).await.map_err(|e| {
error!(sl!(), "send containere_id failed, error: {:?}", e);
error!(sl(), "send containere_id failed, error: {:?}", e);
});
}
});

View File

@@ -1596,10 +1596,8 @@ mod tests {
use tempfile::tempdir;
use test_utils::skip_if_not_root;
macro_rules! sl {
() => {
slog_scope::logger()
};
fn sl() -> slog::Logger {
slog_scope::logger()
}
#[test]
@@ -1854,7 +1852,7 @@ mod tests {
let _ = new_linux_container_and_then(|mut c: LinuxContainer| {
c.processes.insert(
1,
Process::new(&sl!(), &oci::Process::default(), "123", true, 1).unwrap(),
Process::new(&sl(), &oci::Process::default(), "123", true, 1).unwrap(),
);
let p = c.get_process("123");
assert!(p.is_ok(), "Expecting Ok, Got {:?}", p);
@@ -1881,7 +1879,7 @@ mod tests {
let (c, _dir) = new_linux_container();
let ret = c
.unwrap()
.start(Process::new(&sl!(), &oci::Process::default(), "123", true, 1).unwrap())
.start(Process::new(&sl(), &oci::Process::default(), "123", true, 1).unwrap())
.await;
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
}
@@ -1891,7 +1889,7 @@ mod tests {
let (c, _dir) = new_linux_container();
let ret = c
.unwrap()
.run(Process::new(&sl!(), &oci::Process::default(), "123", true, 1).unwrap())
.run(Process::new(&sl(), &oci::Process::default(), "123", true, 1).unwrap())
.await;
assert!(ret.is_err(), "Expecting Err, Got {:?}", ret);
}

View File

@@ -26,11 +26,9 @@ use oci::{LinuxDeviceCgroup, LinuxResources, Spec};
use protocols::agent::Device;
use tracing::instrument;
// Convenience macro to obtain the scope logger
macro_rules! sl {
() => {
slog_scope::logger().new(o!("subsystem" => "device"))
};
// Convenience function to obtain the scope logger.
fn sl() -> slog::Logger {
slog_scope::logger().new(o!("subsystem" => "device"))
}
const VM_ROOTFS: &str = "/";
@@ -78,7 +76,7 @@ where
{
let syspci = Path::new(&syspci);
let drv = drv.as_ref();
info!(sl!(), "rebind_pci_driver: {} => {:?}", dev, drv);
info!(sl(), "rebind_pci_driver: {} => {:?}", dev, drv);
let devpath = syspci.join("devices").join(dev.to_string());
let overridepath = &devpath.join("driver_override");
@@ -606,7 +604,7 @@ fn update_spec_devices(spec: &mut Spec, mut updates: HashMap<&str, DevUpdate>) -
let host_minor = specdev.minor;
info!(
sl!(),
sl(),
"update_spec_devices() updating device";
"container_path" => &specdev.path,
"type" => &specdev.r#type,
@@ -657,7 +655,7 @@ fn update_spec_devices(spec: &mut Spec, mut updates: HashMap<&str, DevUpdate>) -
if let Some(update) = res_updates.get(&(r.r#type.as_str(), host_major, host_minor))
{
info!(
sl!(),
sl(),
"update_spec_devices() updating resource";
"type" => &r.r#type,
"host_major" => host_major,
@@ -921,7 +919,7 @@ pub async fn add_devices(
#[instrument]
async fn add_device(device: &Device, sandbox: &Arc<Mutex<Sandbox>>) -> Result<SpecUpdate> {
// log before validation to help with debugging gRPC protocol version differences.
info!(sl!(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}",
info!(sl(), "device-id: {}, device-type: {}, device-vm-path: {}, device-container-path: {}, device-options: {:?}",
device.id, device.type_, device.vm_path, device.container_path, device.options);
if device.type_.is_empty() {

View File

@@ -65,7 +65,7 @@ use tokio::{
io::AsyncWrite,
sync::{
watch::{channel, Receiver},
Mutex, RwLock,
Mutex,
},
task::JoinHandle,
};
@@ -83,12 +83,11 @@ cfg_if! {
const NAME: &str = "kata-agent";
lazy_static! {
static ref AGENT_CONFIG: Arc<RwLock<AgentConfig>> = Arc::new(RwLock::new(
static ref AGENT_CONFIG: AgentConfig =
// Note: We can't do AgentOpts.parse() here to send through the processed arguments to AgentConfig
// clap::Parser::parse() greedily process all command line input including cargo test parameters,
// so should only be used inside main.
AgentConfig::from_cmdline("/proc/cmdline", env::args().collect()).unwrap()
));
AgentConfig::from_cmdline("/proc/cmdline", env::args().collect()).unwrap();
}
#[derive(Parser)]
@@ -181,13 +180,13 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
lazy_static::initialize(&AGENT_CONFIG);
init_agent_as_init(&logger, AGENT_CONFIG.read().await.unified_cgroup_hierarchy)?;
init_agent_as_init(&logger, AGENT_CONFIG.unified_cgroup_hierarchy)?;
drop(logger_async_guard);
} else {
lazy_static::initialize(&AGENT_CONFIG);
}
let config = AGENT_CONFIG.read().await;
let config = &AGENT_CONFIG;
let log_vport = config.log_vport as u32;
let log_handle = tokio::spawn(create_logger_task(rfd, log_vport, shutdown_rx.clone()));
@@ -200,7 +199,7 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let (logger, logger_async_guard) =
logging::create_logger(NAME, "agent", config.log_level, writer);
announce(&logger, &config);
announce(&logger, config);
// This variable is required as it enables the global (and crucially static) logger,
// which is required to satisfy the the lifetime constraints of the auto-generated gRPC code.
@@ -228,7 +227,7 @@ async fn real_main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let span_guard = root_span.enter();
// Start the sandbox and wait for its ttRPC server to end
start_sandbox(&logger, &config, init_mode, &mut tasks, shutdown_rx.clone()).await?;
start_sandbox(&logger, config, init_mode, &mut tasks, shutdown_rx.clone()).await?;
// Install a NOP logger for the remainder of the shutdown sequence
// to ensure any log calls made by local crates using the scope logger

View File

@@ -15,11 +15,9 @@ use tracing::instrument;
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
const NAMESPACE_KATA_GUEST: &str = "kata_guest";
// Convenience macro to obtain the scope logger
macro_rules! sl {
() => {
slog_scope::logger().new(o!("subsystem" => "metrics"))
};
// Convenience function to obtain the scope logger.
fn sl() -> slog::Logger {
slog_scope::logger().new(o!("subsystem" => "metrics"))
}
lazy_static! {
@@ -139,7 +137,7 @@ fn update_agent_metrics() -> Result<()> {
Ok(p) => p,
Err(e) => {
// FIXME: return Ok for all errors?
warn!(sl!(), "failed to create process instance: {:?}", e);
warn!(sl(), "failed to create process instance: {:?}", e);
return Ok(());
}
@@ -160,7 +158,7 @@ fn update_agent_metrics() -> Result<()> {
// io
match me.io() {
Err(err) => {
info!(sl!(), "failed to get process io stat: {:?}", err);
info!(sl(), "failed to get process io stat: {:?}", err);
}
Ok(io) => {
set_gauge_vec_proc_io(&AGENT_IO_STAT, &io);
@@ -169,7 +167,7 @@ fn update_agent_metrics() -> Result<()> {
match me.stat() {
Err(err) => {
info!(sl!(), "failed to get process stat: {:?}", err);
info!(sl(), "failed to get process stat: {:?}", err);
}
Ok(stat) => {
set_gauge_vec_proc_stat(&AGENT_PROC_STAT, &stat);
@@ -177,7 +175,7 @@ fn update_agent_metrics() -> Result<()> {
}
match me.status() {
Err(err) => error!(sl!(), "failed to get process status: {:?}", err),
Err(err) => error!(sl(), "failed to get process status: {:?}", err),
Ok(status) => set_gauge_vec_proc_status(&AGENT_PROC_STATUS, &status),
}
@@ -189,7 +187,7 @@ fn update_guest_metrics() {
// try get load and task info
match procfs::LoadAverage::new() {
Err(err) => {
info!(sl!(), "failed to get guest LoadAverage: {:?}", err);
info!(sl(), "failed to get guest LoadAverage: {:?}", err);
}
Ok(load) => {
GUEST_LOAD
@@ -209,7 +207,7 @@ fn update_guest_metrics() {
// try to get disk stats
match procfs::diskstats() {
Err(err) => {
info!(sl!(), "failed to get guest diskstats: {:?}", err);
info!(sl(), "failed to get guest diskstats: {:?}", err);
}
Ok(diskstats) => {
for diskstat in diskstats {
@@ -221,7 +219,7 @@ fn update_guest_metrics() {
// try to get vm stats
match procfs::vmstat() {
Err(err) => {
info!(sl!(), "failed to get guest vmstat: {:?}", err);
info!(sl(), "failed to get guest vmstat: {:?}", err);
}
Ok(vmstat) => {
for (k, v) in vmstat {
@@ -233,7 +231,7 @@ fn update_guest_metrics() {
// cpu stat
match procfs::KernelStats::new() {
Err(err) => {
info!(sl!(), "failed to get guest KernelStats: {:?}", err);
info!(sl(), "failed to get guest KernelStats: {:?}", err);
}
Ok(kernel_stats) => {
set_gauge_vec_cpu_time(&GUEST_CPU_TIME, "total", &kernel_stats.total);
@@ -246,7 +244,7 @@ fn update_guest_metrics() {
// try to get net device stats
match procfs::net::dev_status() {
Err(err) => {
info!(sl!(), "failed to get guest net::dev_status: {:?}", err);
info!(sl(), "failed to get guest net::dev_status: {:?}", err);
}
Ok(devs) => {
// netdev: map[string]procfs::net::DeviceStatus
@@ -259,7 +257,7 @@ fn update_guest_metrics() {
// get statistics about memory from /proc/meminfo
match procfs::Meminfo::new() {
Err(err) => {
info!(sl!(), "failed to get guest Meminfo: {:?}", err);
info!(sl(), "failed to get guest Meminfo: {:?}", err);
}
Ok(meminfo) => {
set_gauge_vec_meminfo(&GUEST_MEMINFO, &meminfo);

File diff suppressed because it is too large Load Diff

View File

@@ -69,7 +69,7 @@ macro_rules! trace_rpc_call {
propagator.extract(&extract_carrier_from_ttrpc($ctx))
});
info!(sl!(), "rpc call from shim to agent: {:?}", $name);
info!(sl(), "rpc call from shim to agent: {:?}", $name);
// generate tracing span
let rpc_span = span!(tracing::Level::INFO, $name, "mod"="rpc.rs", req=?$req);

View File

@@ -19,11 +19,9 @@ use tokio::sync::watch::Receiver;
use tokio::sync::Mutex;
use tracing::instrument;
// Convenience macro to obtain the scope logger
macro_rules! sl {
() => {
slog_scope::logger().new(o!("subsystem" => "uevent"))
};
// Convenience function to obtain the scope logger.
fn sl() -> slog::Logger {
slog_scope::logger().new(o!("subsystem" => "uevent"))
}
#[derive(Debug, Default, Clone, PartialEq, Eq)]
@@ -120,11 +118,11 @@ pub async fn wait_for_uevent(
) -> Result<Uevent> {
let logprefix = format!("Waiting for {:?}", &matcher);
info!(sl!(), "{}", logprefix);
info!(sl(), "{}", logprefix);
let mut sb = sandbox.lock().await;
for uev in sb.uevent_map.values() {
if matcher.is_match(uev) {
info!(sl!(), "{}: found {:?} in uevent map", logprefix, &uev);
info!(sl(), "{}: found {:?} in uevent map", logprefix, &uev);
return Ok(uev.clone());
}
}
@@ -139,9 +137,9 @@ pub async fn wait_for_uevent(
sb.uevent_watchers.push(Some((Box::new(matcher), tx)));
drop(sb); // unlock
info!(sl!(), "{}: waiting on channel", logprefix);
info!(sl(), "{}: waiting on channel", logprefix);
let hotplug_timeout = AGENT_CONFIG.read().await.hotplug_timeout;
let hotplug_timeout = AGENT_CONFIG.hotplug_timeout;
let uev = match tokio::time::timeout(hotplug_timeout, rx).await {
Ok(v) => v?,
@@ -157,7 +155,7 @@ pub async fn wait_for_uevent(
}
};
info!(sl!(), "{}: found {:?} on channel", logprefix, &uev);
info!(sl(), "{}: found {:?} on channel", logprefix, &uev);
Ok(uev)
}