Merge pull request #977 from liubin/fix/951-clear-clippy-warnings

agent: clear clippy warnings
This commit is contained in:
Fupan Li 2020-10-22 09:36:45 +08:00 committed by GitHub
commit 074b5332aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 270 additions and 297 deletions

1
.gitignore vendored
View File

@ -3,5 +3,6 @@
**/*.rej **/*.rej
**/target **/target
**/.vscode **/.vscode
pkg/logging/Cargo.lock
src/agent/src/version.rs src/agent/src/version.rs
src/agent/kata-agent.service src/agent/kata-agent.service

View File

@ -15,7 +15,7 @@ os: linux
# https://docs.travis-ci.com/user/caching#clearing-caches # https://docs.travis-ci.com/user/caching#clearing-caches
language: rust language: rust
rust: rust:
- 1.44.1 - 1.47.0
cache: cache:
cargo: true cargo: true
directories: directories:
@ -36,9 +36,9 @@ install:
- rustup target add x86_64-unknown-linux-musl - rustup target add x86_64-unknown-linux-musl
- sudo ln -sf /usr/bin/g++ /bin/musl-g++ - sudo ln -sf /usr/bin/g++ /bin/musl-g++
- rustup component add rustfmt - rustup component add rustfmt
- make -C ${TRAVIS_BUILD_DIR}/src/agent - rustup component add clippy
- make -C ${TRAVIS_BUILD_DIR}/src/agent check - make -C ${TRAVIS_BUILD_DIR}/src/agent clippy
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check - "ci/static-checks.sh"
before_script: before_script:
- "ci/install_go.sh" - "ci/install_go.sh"
@ -47,7 +47,9 @@ before_script:
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test - sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
script: script:
- "ci/static-checks.sh" - make -C ${TRAVIS_BUILD_DIR}/src/agent
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
jobs: jobs:
include: include:

View File

@ -93,9 +93,7 @@ impl HashSerializer {
// Take care to only add the first instance of a key. This matters for loggers (but not // Take care to only add the first instance of a key. This matters for loggers (but not
// Records) since a child loggers have parents and the loggers are serialised child first // Records) since a child loggers have parents and the loggers are serialised child first
// meaning the *newest* fields are serialised first. // meaning the *newest* fields are serialised first.
if !self.fields.contains_key(&key) { self.fields.entry(key).or_insert(value);
self.fields.insert(key, value);
}
} }
fn remove_field(&mut self, key: &str) { fn remove_field(&mut self, key: &str) {

View File

@ -121,6 +121,12 @@ optimize: $(SOURCES) | show-summary show-header
show-header: show-header:
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)" @printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
clippy: $(GENERATED_CODE)
cargo clippy --all-targets --all-features --release \
-- \
-Aclippy::redundant_allocation \
-D warnings
$(GENERATED_FILES): %: %.in $(GENERATED_FILES): %: %.in
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@" @sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"

View File

@ -4,7 +4,6 @@
// //
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json;
use std::error; use std::error;
use std::fmt::{Display, Formatter, Result as FmtResult}; use std::fmt::{Display, Formatter, Result as FmtResult};

View File

@ -3,6 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#![allow(bare_trait_objects)] #![allow(bare_trait_objects)]
#![allow(clippy::redundant_field_names)]
pub mod agent; pub mod agent;
pub mod agent_ttrpc; pub mod agent_ttrpc;
@ -11,11 +12,3 @@ pub mod health;
pub mod health_ttrpc; pub mod health_ttrpc;
pub mod oci; pub mod oci;
pub mod types; pub mod types;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}

View File

@ -6,8 +6,6 @@
// looks like we can use caps to manipulate capabilities // looks like we can use caps to manipulate capabilities
// conveniently, use caps to do it directly.. maybe // conveniently, use caps to do it directly.. maybe
use lazy_static;
use crate::log_child; use crate::log_child;
use crate::sync::write_count; use crate::sync::write_count;
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};

View File

@ -21,7 +21,6 @@ use cgroups::{
use crate::cgroups::Manager as CgroupManager; use crate::cgroups::Manager as CgroupManager;
use crate::container::DEFAULT_DEVICES; use crate::container::DEFAULT_DEVICES;
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
use lazy_static;
use libc::{self, pid_t}; use libc::{self, pid_t};
use nix::errno::Errno; use nix::errno::Errno;
use oci::{ use oci::{
@ -46,18 +45,19 @@ macro_rules! sl {
} }
pub fn load_or_create<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Cgroup<'a> { pub fn load_or_create<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Cgroup<'a> {
let valid_path = path.trim_start_matches("/").to_string(); let valid_path = path.trim_start_matches('/').to_string();
let cg = load(h.clone(), &valid_path); let cg = load(h.clone(), &valid_path);
if cg.is_none() { match cg {
info!(sl!(), "create new cgroup: {}", &valid_path); Some(cg) => cg,
cgroups::Cgroup::new(h, valid_path.as_str()) None => {
} else { info!(sl!(), "create new cgroup: {}", &valid_path);
cg.unwrap() cgroups::Cgroup::new(h, valid_path.as_str())
}
} }
} }
pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option<Cgroup<'a>> { pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option<Cgroup<'a>> {
let valid_path = path.trim_start_matches("/").to_string(); let valid_path = path.trim_start_matches('/').to_string();
let cg = cgroups::Cgroup::load(h, valid_path.as_str()); let cg = cgroups::Cgroup::load(h, valid_path.as_str());
let cpu_controller: &CpuController = cg.controller_of().unwrap(); let cpu_controller: &CpuController = cg.controller_of().unwrap();
if cpu_controller.exists() { if cpu_controller.exists() {
@ -126,7 +126,7 @@ impl CgroupManager for Manager {
} }
// set hugepages resources // set hugepages resources
if r.hugepage_limits.len() > 0 { if !r.hugepage_limits.is_empty() {
set_hugepages_resources(&cg, &r.hugepage_limits, res)?; set_hugepages_resources(&cg, &r.hugepage_limits, res)?;
} }
@ -210,8 +210,8 @@ impl CgroupManager for Manager {
let h = cgroups::hierarchies::auto(); let h = cgroups::hierarchies::auto();
let h = Box::new(&*h); let h = Box::new(&*h);
let cg = load(h, &self.cpath); let cg = load(h, &self.cpath);
if cg.is_some() { if let Some(cg) = cg {
cg.unwrap().delete(); cg.delete();
} }
Ok(()) Ok(())
} }
@ -259,7 +259,7 @@ fn set_network_resources(
fn set_devices_resources( fn set_devices_resources(
_cg: &cgroups::Cgroup, _cg: &cgroups::Cgroup,
device_resources: &Vec<LinuxDeviceCgroup>, device_resources: &[LinuxDeviceCgroup],
res: &mut cgroups::Resources, res: &mut cgroups::Resources,
) -> Result<()> { ) -> Result<()> {
info!(sl!(), "cgroup manager set devices"); info!(sl!(), "cgroup manager set devices");
@ -291,7 +291,7 @@ fn set_devices_resources(
fn set_hugepages_resources( fn set_hugepages_resources(
_cg: &cgroups::Cgroup, _cg: &cgroups::Cgroup,
hugepage_limits: &Vec<LinuxHugepageLimit>, hugepage_limits: &[LinuxHugepageLimit],
res: &mut cgroups::Resources, res: &mut cgroups::Resources,
) -> Result<()> { ) -> Result<()> {
info!(sl!(), "cgroup manager set hugepage"); info!(sl!(), "cgroup manager set hugepage");
@ -453,7 +453,7 @@ fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
} }
fn build_blk_io_device_throttle_resource( fn build_blk_io_device_throttle_resource(
input: &Vec<oci::LinuxThrottleDevice>, input: &[oci::LinuxThrottleDevice],
) -> Vec<BlkIoDeviceThrottleResource> { ) -> Vec<BlkIoDeviceThrottleResource> {
let mut blk_io_device_throttle_resources = vec![]; let mut blk_io_device_throttle_resources = vec![];
for d in input.iter() { for d in input.iter() {
@ -685,7 +685,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
// use_hierarchy // use_hierarchy
let value = memory.use_hierarchy; let value = memory.use_hierarchy;
let use_hierarchy = if value == 1 { true } else { false }; let use_hierarchy = value == 1;
// gte memory datas // gte memory datas
let usage = SingularPtrField::some(MemoryData { let usage = SingularPtrField::some(MemoryData {
@ -739,13 +739,12 @@ fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> {
let current = pid_controller.get_pid_current().unwrap_or(0); let current = pid_controller.get_pid_current().unwrap_or(0);
let max = pid_controller.get_pid_max(); let max = pid_controller.get_pid_max();
let limit = if max.is_err() { let limit = match max {
0 Err(_) => 0,
} else { Ok(max) => match max {
match max.unwrap() {
MaxValue::Value(v) => v, MaxValue::Value(v) => v,
MaxValue::Max => 0, MaxValue::Max => 0,
} },
} as u64; } as u64;
SingularPtrField::some(PidsStats { SingularPtrField::some(PidsStats {
@ -788,9 +787,9 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8
Total 0 Total 0
*/ */
fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioStatsEntry> { fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStatsEntry> {
let mut m = RepeatedField::new(); let mut m = RepeatedField::new();
if blkiodata.len() == 0 { if blkiodata.is_empty() {
return m; return m;
} }
@ -810,10 +809,10 @@ fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioSt
m m
} }
fn get_blkio_stat_ioservice(services: &Vec<IoService>) -> RepeatedField<BlkioStatsEntry> { fn get_blkio_stat_ioservice(services: &[IoService]) -> RepeatedField<BlkioStatsEntry> {
let mut m = RepeatedField::new(); let mut m = RepeatedField::new();
if services.len() == 0 { if services.is_empty() {
return m; return m;
} }
@ -834,7 +833,7 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki
major: major as u64, major: major as u64,
minor: minor as u64, minor: minor as u64,
op: op.to_string(), op: op.to_string(),
value: value, value,
unknown_fields: UnknownFields::default(), unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(), cached_size: CachedSize::default(),
} }
@ -875,7 +874,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
let mut m = BlkioStats::new(); let mut m = BlkioStats::new();
let io_serviced_recursive = blkio.io_serviced_recursive; let io_serviced_recursive = blkio.io_serviced_recursive;
if io_serviced_recursive.len() == 0 { if io_serviced_recursive.is_empty() {
// fall back to generic stats // fall back to generic stats
// blkio.throttle.io_service_bytes, // blkio.throttle.io_service_bytes,
// maybe io_service_bytes_recursive? // maybe io_service_bytes_recursive?
@ -930,8 +929,8 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap<String, HugetlbStats> {
h h
} }
pub const PATHS: &'static str = "/proc/self/cgroup"; pub const PATHS: &str = "/proc/self/cgroup";
pub const MOUNTS: &'static str = "/proc/self/mountinfo"; pub const MOUNTS: &str = "/proc/self/mountinfo";
pub fn get_paths() -> Result<HashMap<String, String>> { pub fn get_paths() -> Result<HashMap<String, String>> {
let mut m = HashMap::new(); let mut m = HashMap::new();
@ -1056,7 +1055,7 @@ impl Manager {
if i == 0 { if i == 0 {
break; break;
} }
i = i - 1; i -= 1;
let h = cgroups::hierarchies::auto(); let h = cgroups::hierarchies::auto();
let h = Box::new(&*h); let h = Box::new(&*h);

View File

@ -41,7 +41,7 @@ fn get_value_from_cgroup(path: &PathBuf, key: &str) -> Result<i64> {
); );
for line in content.lines() { for line in content.lines() {
let arr: Vec<&str> = line.split(" ").collect(); let arr: Vec<&str> = line.split(' ').collect();
if arr.len() == 2 && arr[0] == key { if arr.len() == 2 && arr[0] == key {
let r = arr[1].parse::<i64>()?; let r = arr[1].parse::<i64>()?;
return Ok(r); return Ok(r);

View File

@ -4,12 +4,9 @@
// //
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
use dirs;
use lazy_static;
use libc::pid_t; use libc::pid_t;
use oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec}; use oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec};
use oci::{LinuxDevice, LinuxIDMapping}; use oci::{LinuxDevice, LinuxIDMapping};
use serde_json;
use std::clone::Clone; use std::clone::Clone;
use std::ffi::{CStr, CString}; use std::ffi::{CStr, CString};
use std::fmt; use std::fmt;
@ -43,7 +40,6 @@ use nix::sys::signal::{self, Signal};
use nix::sys::stat::{self, Mode}; use nix::sys::stat::{self, Mode};
use nix::unistd::{self, ForkResult, Gid, Pid, Uid}; use nix::unistd::{self, ForkResult, Gid, Pid, Uid};
use libc;
use protobuf::SingularPtrField; use protobuf::SingularPtrField;
use oci::State as OCIState; use oci::State as OCIState;
@ -54,9 +50,9 @@ use std::os::unix::io::FromRawFd;
use slog::{info, o, Logger}; use slog::{info, o, Logger};
const STATE_FILENAME: &'static str = "state.json"; const STATE_FILENAME: &str = "state.json";
const EXEC_FIFO_FILENAME: &'static str = "exec.fifo"; const EXEC_FIFO_FILENAME: &str = "exec.fifo";
const VER_MARKER: &'static str = "1.2.5"; const VER_MARKER: &str = "1.2.5";
const PID_NS_PATH: &str = "/proc/self/ns/pid"; const PID_NS_PATH: &str = "/proc/self/ns/pid";
const INIT: &str = "INIT"; const INIT: &str = "INIT";
@ -551,7 +547,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
setid(uid, gid)?; setid(uid, gid)?;
if guser.additional_gids.len() > 0 { if !guser.additional_gids.is_empty() {
setgroups(guser.additional_gids.as_slice()).map_err(|e| { setgroups(guser.additional_gids.as_slice()).map_err(|e| {
let _ = write_sync( let _ = write_sync(
cwfd, cwfd,
@ -595,7 +591,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
// setup the envs // setup the envs
for e in env.iter() { for e in env.iter() {
let v: Vec<&str> = e.splitn(2, "=").collect(); let v: Vec<&str> = e.splitn(2, '=').collect();
if v.len() != 2 { if v.len() != 2 {
continue; continue;
} }
@ -731,7 +727,7 @@ impl BaseContainer for LinuxContainer {
info!(logger, "enter container.start!"); info!(logger, "enter container.start!");
let mut fifofd: RawFd = -1; let mut fifofd: RawFd = -1;
if p.init { if p.init {
if let Ok(_) = stat::stat(fifo_file.as_str()) { if stat::stat(fifo_file.as_str()).is_ok() {
return Err(anyhow!("exec fifo exists")); return Err(anyhow!("exec fifo exists"));
} }
unistd::mkfifo(fifo_file.as_str(), Mode::from_bits(0o622).unwrap())?; unistd::mkfifo(fifo_file.as_str(), Mode::from_bits(0o622).unwrap())?;
@ -931,7 +927,7 @@ impl BaseContainer for LinuxContainer {
.join() .join()
.map_err(|e| warn!(logger, "joining log handler {:?}", e)); .map_err(|e| warn!(logger, "joining log handler {:?}", e));
info!(logger, "create process completed"); info!(logger, "create process completed");
return Ok(()); Ok(())
} }
fn run(&mut self, p: Process) -> Result<()> { fn run(&mut self, p: Process) -> Result<()> {
@ -1164,11 +1160,9 @@ fn join_namespaces(
} }
// apply cgroups // apply cgroups
if p.init { if p.init && res.is_some() {
if res.is_some() { info!(logger, "apply cgroups!");
info!(logger, "apply cgroups!"); cm.set(res.unwrap(), false)?;
cm.set(res.unwrap(), false)?;
}
} }
if res.is_some() { if res.is_some() {
@ -1464,7 +1458,7 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
} }
} }
return Ok(()); Ok(())
} }
ForkResult::Child => { ForkResult::Child => {
@ -1567,13 +1561,11 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
error error
} }
} }
} else if let Ok(s) = rx.recv() {
s
} else { } else {
if let Ok(s) = rx.recv() { let _ = signal::kill(Pid::from_raw(pid), Some(Signal::SIGKILL));
s -libc::EPIPE
} else {
let _ = signal::kill(Pid::from_raw(pid), Some(Signal::SIGKILL));
-libc::EPIPE
}
} }
}; };

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
use anyhow::{anyhow, bail, Context, Error, Result}; use anyhow::{anyhow, bail, Context, Result};
use libc::uid_t; use libc::uid_t;
use nix::errno::Errno; use nix::errno::Errno;
use nix::fcntl::{self, OFlag}; use nix::fcntl::{self, OFlag};
@ -22,13 +22,11 @@ use std::os::unix::io::RawFd;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use path_absolutize::*; use path_absolutize::*;
use scan_fmt;
use std::fs::File; use std::fs::File;
use std::io::{BufRead, BufReader}; use std::io::{BufRead, BufReader};
use crate::container::DEFAULT_DEVICES; use crate::container::DEFAULT_DEVICES;
use crate::sync::write_count; use crate::sync::write_count;
use lazy_static;
use std::string::ToString; use std::string::ToString;
use crate::log_child; use crate::log_child;
@ -50,7 +48,7 @@ pub struct Info {
vfs_opts: String, vfs_opts: String,
} }
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}"; const MOUNTINFOFORMAT: &str = "{d} {d} {d}:{d} {} {} {} {}";
const PROC_PATH: &str = "/proc"; const PROC_PATH: &str = "/proc";
// since libc didn't defined this const for musl, thus redefined it here. // since libc didn't defined this const for musl, thus redefined it here.
@ -153,7 +151,7 @@ pub fn init_rootfs(
let linux = &spec let linux = &spec
.linux .linux
.as_ref() .as_ref()
.ok_or::<Error>(anyhow!("Could not get linux configuration from spec"))?; .ok_or_else(|| anyhow!("Could not get linux configuration from spec"))?;
let mut flags = MsFlags::MS_REC; let mut flags = MsFlags::MS_REC;
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) { match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
@ -164,14 +162,14 @@ pub fn init_rootfs(
let root = spec let root = spec
.root .root
.as_ref() .as_ref()
.ok_or(anyhow!("Could not get rootfs path from spec")) .ok_or_else(|| anyhow!("Could not get rootfs path from spec"))
.and_then(|r| { .and_then(|r| {
fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path") fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path")
})?; })?;
let rootfs = (*root) let rootfs = (*root)
.to_str() .to_str()
.ok_or(anyhow!("Could not convert rootfs path to string"))?; .ok_or_else(|| anyhow!("Could not convert rootfs path to string"))?;
mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?; mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
@ -187,7 +185,7 @@ pub fn init_rootfs(
for m in &spec.mounts { for m in &spec.mounts {
let (mut flags, data) = parse_mount(&m); let (mut flags, data) = parse_mount(&m);
if !m.destination.starts_with("/") || m.destination.contains("..") { if !m.destination.starts_with('/') || m.destination.contains("..") {
return Err(anyhow!( return Err(anyhow!(
"the mount destination {} is invalid", "the mount destination {} is invalid",
m.destination m.destination
@ -273,9 +271,9 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
// only allow a mount on-top of proc if it's source is "proc" // only allow a mount on-top of proc if it's source is "proc"
unsafe { unsafe {
let mut stats = MaybeUninit::<libc::statfs>::uninit(); let mut stats = MaybeUninit::<libc::statfs>::uninit();
if let Ok(_) = m if m.source
.source
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr())) .with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
.is_ok()
{ {
if stats.assume_init().f_type == PROC_SUPER_MAGIC { if stats.assume_init().f_type == PROC_SUPER_MAGIC {
return Ok(()); return Ok(());
@ -298,7 +296,7 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
))); )));
} }
return Ok(()); Ok(())
} }
fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> { fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> {
@ -586,15 +584,14 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
let abs_root_buf = root_path.absolutize()?; let abs_root_buf = root_path.absolutize()?;
let abs_root = abs_root_buf let abs_root = abs_root_buf
.to_str() .to_str()
.ok_or::<Error>(anyhow!("failed to parse {} to absolute path", rootfs))?; .ok_or_else(|| anyhow!("failed to parse {} to absolute path", rootfs))?;
for info in mount_infos.iter() { for info in mount_infos.iter() {
let mount_point = Path::new(&info.mount_point); let mount_point = Path::new(&info.mount_point);
let abs_mount_buf = mount_point.absolutize()?; let abs_mount_buf = mount_point.absolutize()?;
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(anyhow!( let abs_mount_point = abs_mount_buf
"failed to parse {} to absolute path", .to_str()
info.mount_point .ok_or_else(|| anyhow!("failed to parse {} to absolute path", info.mount_point))?;
))?;
let abs_mount_point_string = String::from(abs_mount_point); let abs_mount_point_string = String::from(abs_mount_point);
// Umount every syfs and proc file systems, except those under the container rootfs // Umount every syfs and proc file systems, except those under the container rootfs
@ -755,7 +752,7 @@ fn mount_from(
Ok(()) Ok(())
} }
static SYMLINKS: &'static [(&'static str, &'static str)] = &[ static SYMLINKS: &[(&str, &str)] = &[
("/proc/self/fd", "dev/fd"), ("/proc/self/fd", "dev/fd"),
("/proc/self/fd/0", "dev/stdin"), ("/proc/self/fd/0", "dev/stdin"),
("/proc/self/fd/1", "dev/stdout"), ("/proc/self/fd/1", "dev/stdout"),
@ -888,7 +885,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
} }
fn mask_path(path: &str) -> Result<()> { fn mask_path(path: &str) -> Result<()> {
if !path.starts_with("/") || path.contains("..") { if !path.starts_with('/') || path.contains("..") {
return Err(nix::Error::Sys(Errno::EINVAL).into()); return Err(nix::Error::Sys(Errno::EINVAL).into());
} }
@ -917,7 +914,7 @@ fn mask_path(path: &str) -> Result<()> {
} }
fn readonly_path(path: &str) -> Result<()> { fn readonly_path(path: &str) -> Result<()> {
if !path.starts_with("/") || path.contains("..") { if !path.starts_with('/') || path.contains("..") {
return Err(nix::Error::Sys(Errno::EINVAL).into()); return Err(nix::Error::Sys(Errno::EINVAL).into());
} }

View File

@ -88,14 +88,14 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]]; let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
let msg: i32 = i32::from_be_bytes(buf_array); let msg: i32 = i32::from_be_bytes(buf_array);
match msg { match msg {
SYNC_SUCCESS => return Ok(Vec::new()), SYNC_SUCCESS => Ok(Vec::new()),
SYNC_DATA => { SYNC_DATA => {
let buf = read_count(fd, MSG_SIZE)?; let buf = read_count(fd, MSG_SIZE)?;
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]]; let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
let msg_length: i32 = i32::from_be_bytes(buf_array); let msg_length: i32 = i32::from_be_bytes(buf_array);
let data_buf = read_count(fd, msg_length as usize)?; let data_buf = read_count(fd, msg_length as usize)?;
return Ok(data_buf); Ok(data_buf)
} }
SYNC_FAILED => { SYNC_FAILED => {
let mut error_buf = vec![]; let mut error_buf = vec![];
@ -119,9 +119,9 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
} }
}; };
return Err(anyhow!(error_str)); Err(anyhow!(error_str))
} }
_ => return Err(anyhow!("error in receive sync message")), _ => Err(anyhow!("error in receive sync message")),
} }
} }

View File

@ -5,13 +5,12 @@
use crate::container::Config; use crate::container::Config;
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use lazy_static;
use nix::errno::Errno; use nix::errno::Errno;
use oci::{LinuxIDMapping, LinuxNamespace, Spec}; use oci::{LinuxIDMapping, LinuxNamespace, Spec};
use std::collections::HashMap; use std::collections::HashMap;
use std::path::{Component, PathBuf}; use std::path::{Component, PathBuf};
fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool { fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
for ns in nses { for ns in nses {
if ns.r#type.as_str() == key { if ns.r#type.as_str() == key {
return true; return true;
@ -21,7 +20,7 @@ fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
false false
} }
fn get_namespace_path(nses: &Vec<LinuxNamespace>, key: &str) -> Result<String> { fn get_namespace_path(nses: &[LinuxNamespace], key: &str) -> Result<String> {
for ns in nses { for ns in nses {
if ns.r#type.as_str() == key { if ns.r#type.as_str() == key {
return Ok(ns.path.clone()); return Ok(ns.path.clone());
@ -41,10 +40,8 @@ fn rootfs(root: &str) -> Result<()> {
// symbolic link? ..? // symbolic link? ..?
let mut stack: Vec<String> = Vec::new(); let mut stack: Vec<String> = Vec::new();
for c in path.components() { for c in path.components() {
if stack.is_empty() { if stack.is_empty() && (c == Component::RootDir || c == Component::ParentDir) {
if c == Component::RootDir || c == Component::ParentDir { continue;
continue;
}
} }
if c == Component::ParentDir { if c == Component::ParentDir {
@ -74,7 +71,7 @@ fn network(_oci: &Spec) -> Result<()> {
} }
fn hostname(oci: &Spec) -> Result<()> { fn hostname(oci: &Spec) -> Result<()> {
if oci.hostname.is_empty() || oci.hostname == "".to_string() { if oci.hostname.is_empty() || oci.hostname == "" {
return Ok(()); return Ok(());
} }
@ -91,7 +88,7 @@ fn hostname(oci: &Spec) -> Result<()> {
fn security(oci: &Spec) -> Result<()> { fn security(oci: &Spec) -> Result<()> {
let linux = oci.linux.as_ref().unwrap(); let linux = oci.linux.as_ref().unwrap();
if linux.masked_paths.len() == 0 && linux.readonly_paths.len() == 0 { if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() {
return Ok(()); return Ok(());
} }
@ -104,7 +101,7 @@ fn security(oci: &Spec) -> Result<()> {
Ok(()) Ok(())
} }
fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> { fn idmapping(maps: &[LinuxIDMapping]) -> Result<()> {
for map in maps { for map in maps {
if map.size > 0 { if map.size > 0 {
return Ok(()); return Ok(());
@ -127,7 +124,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
idmapping(&linux.gid_mappings)?; idmapping(&linux.gid_mappings)?;
} else { } else {
// no user namespace but idmap // no user namespace but idmap
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 { if !linux.uid_mappings.is_empty() || !linux.gid_mappings.is_empty() {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
} }
@ -197,7 +194,7 @@ fn sysctl(oci: &Spec) -> Result<()> {
} }
let net = get_namespace_path(&linux.namespaces, "network")?; let net = get_namespace_path(&linux.namespaces, "network")?;
if net.is_empty() || net == "".to_string() { if net.is_empty() || net == "" {
continue; continue;
} }
@ -225,7 +222,7 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
if linux.uid_mappings.len() == 0 || linux.gid_mappings.len() == 0 { if linux.uid_mappings.is_empty() || linux.gid_mappings.is_empty() {
// rootless containers requires at least one UID/GID mapping // rootless containers requires at least one UID/GID mapping
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
@ -233,7 +230,7 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
Ok(()) Ok(())
} }
fn has_idmapping(maps: &Vec<LinuxIDMapping>, id: u32) -> bool { fn has_idmapping(maps: &[LinuxIDMapping], id: u32) -> bool {
for map in maps { for map in maps {
if id >= map.container_id && id < map.container_id + map.size { if id >= map.container_id && id < map.container_id + map.size {
return true; return true;
@ -256,16 +253,12 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
let id = fields[1].trim().parse::<u32>()?; let id = fields[1].trim().parse::<u32>()?;
if opt.starts_with("uid=") { if opt.starts_with("uid=") && !has_idmapping(&linux.uid_mappings, id) {
if !has_idmapping(&linux.uid_mappings, id) { return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
} }
if opt.starts_with("gid=") { if opt.starts_with("gid=") && !has_idmapping(&linux.gid_mappings, id) {
if !has_idmapping(&linux.gid_mappings, id) { return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
} }
} }
} }

View File

@ -144,7 +144,7 @@ impl agentConfig {
} }
fn get_vsock_port(p: &str) -> Result<i32> { fn get_vsock_port(p: &str) -> Result<i32> {
let fields: Vec<&str> = p.split("=").collect(); let fields: Vec<&str> = p.split('=').collect();
if fields.len() != 2 { if fields.len() != 2 {
return Err(anyhow!("invalid port parameter")); return Err(anyhow!("invalid port parameter"));
} }
@ -180,7 +180,7 @@ fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
} }
fn get_log_level(param: &str) -> Result<slog::Level> { fn get_log_level(param: &str) -> Result<slog::Level> {
let fields: Vec<&str> = param.split("=").collect(); let fields: Vec<&str> = param.split('=').collect();
if fields.len() != 2 { if fields.len() != 2 {
return Err(anyhow!("invalid log level parameter")); return Err(anyhow!("invalid log level parameter"));
@ -194,7 +194,7 @@ fn get_log_level(param: &str) -> Result<slog::Level> {
} }
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> { fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
let fields: Vec<&str> = param.split("=").collect(); let fields: Vec<&str> = param.split('=').collect();
if fields.len() != 2 { if fields.len() != 2 {
return Err(anyhow!("invalid hotplug timeout parameter")); return Err(anyhow!("invalid hotplug timeout parameter"));
@ -214,7 +214,7 @@ fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
} }
fn get_bool_value(param: &str) -> Result<bool> { fn get_bool_value(param: &str) -> Result<bool> {
let fields: Vec<&str> = param.split("=").collect(); let fields: Vec<&str> = param.split('=').collect();
if fields.len() != 2 { if fields.len() != 2 {
return Ok(false); return Ok(false);
@ -225,18 +225,14 @@ fn get_bool_value(param: &str) -> Result<bool> {
// first try to parse as bool value // first try to parse as bool value
v.parse::<bool>().or_else(|_err1| { v.parse::<bool>().or_else(|_err1| {
// then try to parse as integer value // then try to parse as integer value
v.parse::<u64>().or_else(|_err2| Ok(0)).and_then(|v| { v.parse::<u64>()
// only `0` returns false, otherwise returns true .or_else(|_err2| Ok(0))
Ok(match v { .map(|v| !matches!(v, 0))
0 => false,
_ => true,
})
})
}) })
} }
fn get_container_pipe_size(param: &str) -> Result<i32> { fn get_container_pipe_size(param: &str) -> Result<i32> {
let fields: Vec<&str> = param.split("=").collect(); let fields: Vec<&str> = param.split('=').collect();
if fields.len() != 2 { if fields.len() != 2 {
return Err(anyhow!("invalid container pipe size parameter")); return Err(anyhow!("invalid container pipe size parameter"));
@ -634,10 +630,10 @@ mod tests {
let filename = file_path.to_str().expect("failed to create filename"); let filename = file_path.to_str().expect("failed to create filename");
let mut file = let mut file =
File::create(filename).expect(&format!("{}: failed to create file", msg)); File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes()) file.write_all(d.contents.as_bytes())
.expect(&format!("{}: failed to write file contents", msg)); .unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
let mut config = agentConfig::new(); let mut config = agentConfig::new();
assert_eq!(config.debug_console, false, "{}", msg); assert_eq!(config.debug_console, false, "{}", msg);
@ -737,7 +733,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result); let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, format!("{}", msg)); assert_result!(d.result, result, msg);
} }
} }
@ -831,7 +827,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result); let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, format!("{}", msg)); assert_result!(d.result, result, msg);
} }
} }
@ -901,7 +897,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result); let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, format!("{}", msg)); assert_result!(d.result, result, msg);
} }
} }
@ -975,7 +971,7 @@ mod tests {
let msg = format!("{}: result: {:?}", msg, result); let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, format!("{}", msg)); assert_result!(d.result, result, msg);
} }
} }
} }

View File

@ -38,8 +38,8 @@ struct DevIndex(HashMap<String, DevIndexEntry>);
// DeviceHandler is the type of callback to be defined to handle every type of device driver. // DeviceHandler is the type of callback to be defined to handle every type of device driver.
type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>, &DevIndex) -> Result<()>; type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>, &DevIndex) -> Result<()>;
// DeviceHandlerList lists the supported drivers. // DEVICEHANDLERLIST lists the supported drivers.
#[cfg_attr(rustfmt, rustfmt_skip)] #[rustfmt::skip]
lazy_static! { lazy_static! {
static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = { static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = {
let mut m: HashMap<&'static str, DeviceHandler> = HashMap::new(); let mut m: HashMap<&'static str, DeviceHandler> = HashMap::new();
@ -65,7 +65,7 @@ pub fn online_device(path: &str) -> Result<()> {
// Here, bridgeAddr is the address at which the bridge is attached on the root bus, // Here, bridgeAddr is the address at which the bridge is attached on the root bus,
// while deviceAddr is the address at which the device is attached on the bridge. // while deviceAddr is the address at which the device is attached on the bridge.
fn get_pci_device_address(pci_id: &str) -> Result<String> { fn get_pci_device_address(pci_id: &str) -> Result<String> {
let tokens: Vec<&str> = pci_id.split("/").collect(); let tokens: Vec<&str> = pci_id.split('/').collect();
if tokens.len() != 2 { if tokens.len() != 2 {
return Err(anyhow!( return Err(anyhow!(
@ -165,7 +165,7 @@ pub fn get_pci_device_name(sandbox: &Arc<Mutex<Sandbox>>, pci_id: &str) -> Resul
/// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN) /// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN)
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> { fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
let tokens: Vec<&str> = scsi_addr.split(":").collect(); let tokens: Vec<&str> = scsi_addr.split(':').collect();
if tokens.len() != 2 { if tokens.len() != 2 {
return Err(anyhow!( return Err(anyhow!(
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA", "Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
@ -336,11 +336,11 @@ impl DevIndex {
fn new(spec: &Spec) -> DevIndex { fn new(spec: &Spec) -> DevIndex {
let mut map = HashMap::new(); let mut map = HashMap::new();
for linux in spec.linux.as_ref() { if let Some(linux) = spec.linux.as_ref() {
for (i, d) in linux.devices.iter().enumerate() { for (i, d) in linux.devices.iter().enumerate() {
let mut residx = Vec::new(); let mut residx = Vec::new();
for linuxres in linux.resources.as_ref() { if let Some(linuxres) = linux.resources.as_ref() {
for (j, r) in linuxres.devices.iter().enumerate() { for (j, r) in linuxres.devices.iter().enumerate() {
if r.r#type == d.r#type if r.r#type == d.r#type
&& r.major == Some(d.major) && r.major == Some(d.major)

View File

@ -246,8 +246,8 @@ fn start_sandbox(logger: &Logger, config: &agentConfig, init_mode: bool) -> Resu
let (tx, rx) = mpsc::channel::<i32>(); let (tx, rx) = mpsc::channel::<i32>();
sandbox.lock().unwrap().sender = Some(tx); sandbox.lock().unwrap().sender = Some(tx);
//vsock:///dev/vsock, port // vsock:///dev/vsock, port
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str()); let mut server = rpc::start(sandbox, config.server_addr.as_str());
let _ = server.start().unwrap(); let _ = server.start().unwrap();
@ -272,8 +272,6 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
let signals = Signals::new(&[SIGCHLD])?; let signals = Signals::new(&[SIGCHLD])?;
let s = sandbox.clone();
thread::spawn(move || { thread::spawn(move || {
'outer: for sig in signals.forever() { 'outer: for sig in signals.forever() {
info!(logger, "received signal"; "signal" => sig); info!(logger, "received signal"; "signal" => sig);
@ -303,13 +301,13 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
}; };
let pid = wait_status.pid(); let pid = wait_status.pid();
if pid.is_some() { if let Some(pid) = pid {
let raw_pid = pid.unwrap().as_raw(); let raw_pid = pid.as_raw();
let child_pid = format!("{}", raw_pid); let child_pid = format!("{}", raw_pid);
let logger = logger.new(o!("child-pid" => child_pid)); let logger = logger.new(o!("child-pid" => child_pid));
let mut sandbox = s.lock().unwrap(); let mut sandbox = sandbox.lock().unwrap();
let process = sandbox.find_process(raw_pid); let process = sandbox.find_process(raw_pid);
if process.is_none() { if process.is_none() {
info!(logger, "child exited unexpectedly"); info!(logger, "child exited unexpectedly");
@ -366,7 +364,8 @@ fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result
env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/"); env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/");
let contents = std::fs::read_to_string("/etc/hostname").unwrap_or(String::from("localhost")); let contents =
std::fs::read_to_string("/etc/hostname").unwrap_or_else(|_| String::from("localhost"));
let contents_array: Vec<&str> = contents.split(' ').collect(); let contents_array: Vec<&str> = contents.split(' ').collect();
let hostname = contents_array[0].trim(); let hostname = contents_array[0].trim();
@ -481,8 +480,8 @@ where
// write and return // write and return
match writer.write_all(&buf[..buf_len]) { match writer.write_all(&buf[..buf_len]) {
Ok(_) => return Ok(buf_len as u64), Ok(_) => Ok(buf_len as u64),
Err(err) => return Err(err), Err(err) => Err(err),
} }
} }

View File

@ -8,7 +8,6 @@ extern crate procfs;
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder}; use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
use anyhow::Result; use anyhow::Result;
use protocols;
const NAMESPACE_KATA_AGENT: &str = "kata_agent"; const NAMESPACE_KATA_AGENT: &str = "kata_agent";
const NAMESPACE_KATA_GUEST: &str = "kata_guest"; const NAMESPACE_KATA_GUEST: &str = "kata_guest";
@ -85,17 +84,15 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
let encoder = TextEncoder::new(); let encoder = TextEncoder::new();
encoder.encode(&metric_families, &mut buffer).unwrap(); encoder.encode(&metric_families, &mut buffer).unwrap();
Ok(String::from_utf8(buffer.clone()).unwrap()) Ok(String::from_utf8(buffer).unwrap())
} }
fn update_agent_metrics() { fn update_agent_metrics() {
let me = procfs::process::Process::myself(); let me = procfs::process::Process::myself();
match me {
Err(err) => { if let Err(err) = me {
error!(sl!(), "failed to create process instance: {:?}", err); error!(sl!(), "failed to create process instance: {:?}", err);
return; return;
}
Ok(_) => {}
} }
let me = me.unwrap(); let me = me.unwrap();

View File

@ -39,7 +39,7 @@ pub const DRIVERLOCALTYPE: &str = "local";
pub const TYPEROOTFS: &str = "rootfs"; pub const TYPEROOTFS: &str = "rootfs";
#[cfg_attr(rustfmt, rustfmt_skip)] #[rustfmt::skip]
lazy_static! { lazy_static! {
pub static ref FLAGS: HashMap<&'static str, (bool, MsFlags)> = { pub static ref FLAGS: HashMap<&'static str, (bool, MsFlags)> = {
let mut m = HashMap::new(); let mut m = HashMap::new();
@ -88,7 +88,7 @@ pub struct INIT_MOUNT {
options: Vec<&'static str>, options: Vec<&'static str>,
} }
#[cfg_attr(rustfmt, rustfmt_skip)] #[rustfmt::skip]
lazy_static!{ lazy_static!{
static ref CGROUPS: HashMap<&'static str, &'static str> = { static ref CGROUPS: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new(); let mut m = HashMap::new();
@ -109,7 +109,7 @@ lazy_static!{
}; };
} }
#[cfg_attr(rustfmt, rustfmt_skip)] #[rustfmt::skip]
lazy_static! { lazy_static! {
pub static ref INIT_ROOTFS_MOUNTS: Vec<INIT_MOUNT> = vec![ pub static ref INIT_ROOTFS_MOUNTS: Vec<INIT_MOUNT> = vec![
INIT_MOUNT{fstype: "proc", src: "proc", dest: "/proc", options: vec!["nosuid", "nodev", "noexec"]}, INIT_MOUNT{fstype: "proc", src: "proc", dest: "/proc", options: vec!["nosuid", "nodev", "noexec"]},
@ -126,7 +126,7 @@ lazy_static! {
type StorageHandler = fn(&Logger, &Storage, Arc<Mutex<Sandbox>>) -> Result<String>; type StorageHandler = fn(&Logger, &Storage, Arc<Mutex<Sandbox>>) -> Result<String>;
// STORAGEHANDLERLIST lists the supported drivers. // STORAGEHANDLERLIST lists the supported drivers.
#[cfg_attr(rustfmt, rustfmt_skip)] #[rustfmt::skip]
lazy_static! { lazy_static! {
pub static ref STORAGEHANDLERLIST: HashMap<&'static str, StorageHandler> = { pub static ref STORAGEHANDLERLIST: HashMap<&'static str, StorageHandler> = {
let mut m = HashMap::new(); let mut m = HashMap::new();
@ -173,9 +173,9 @@ impl<'a> BareMount<'a> {
BareMount { BareMount {
source: s, source: s,
destination: d, destination: d,
fs_type: fs_type, fs_type,
flags: flags, flags,
options: options, options,
logger: logger.new(o!("subsystem" => "baremount")), logger: logger.new(o!("subsystem" => "baremount")),
} }
} }
@ -190,11 +190,11 @@ impl<'a> BareMount<'a> {
let cstr_dest: CString; let cstr_dest: CString;
let cstr_fs_type: CString; let cstr_fs_type: CString;
if self.source.len() == 0 { if self.source.is_empty() {
return Err(anyhow!("need mount source")); return Err(anyhow!("need mount source"));
} }
if self.destination.len() == 0 { if self.destination.is_empty() {
return Err(anyhow!("need mount destination")); return Err(anyhow!("need mount destination"));
} }
@ -204,14 +204,14 @@ impl<'a> BareMount<'a> {
cstr_dest = CString::new(self.destination)?; cstr_dest = CString::new(self.destination)?;
dest = cstr_dest.as_ptr(); dest = cstr_dest.as_ptr();
if self.fs_type.len() == 0 { if self.fs_type.is_empty() {
return Err(anyhow!("need mount FS type")); return Err(anyhow!("need mount FS type"));
} }
cstr_fs_type = CString::new(self.fs_type)?; cstr_fs_type = CString::new(self.fs_type)?;
fs_type = cstr_fs_type.as_ptr(); fs_type = cstr_fs_type.as_ptr();
if self.options.len() > 0 { if !self.options.is_empty() {
cstr_options = CString::new(self.options)?; cstr_options = CString::new(self.options)?;
options = cstr_options.as_ptr() as *const c_void; options = cstr_options.as_ptr() as *const c_void;
} }
@ -243,8 +243,7 @@ fn ephemeral_storage_handler(
storage: &Storage, storage: &Storage,
sandbox: Arc<Mutex<Sandbox>>, sandbox: Arc<Mutex<Sandbox>>,
) -> Result<String> { ) -> Result<String> {
let s = sandbox.clone(); let mut sb = sandbox.lock().unwrap();
let mut sb = s.lock().unwrap();
let new_storage = sb.set_sandbox_storage(&storage.mount_point); let new_storage = sb.set_sandbox_storage(&storage.mount_point);
if !new_storage { if !new_storage {
@ -262,8 +261,7 @@ fn local_storage_handler(
storage: &Storage, storage: &Storage,
sandbox: Arc<Mutex<Sandbox>>, sandbox: Arc<Mutex<Sandbox>>,
) -> Result<String> { ) -> Result<String> {
let s = sandbox.clone(); let mut sb = sandbox.lock().unwrap();
let mut sb = s.lock().unwrap();
let new_storage = sb.set_sandbox_storage(&storage.mount_point); let new_storage = sb.set_sandbox_storage(&storage.mount_point);
if !new_storage { if !new_storage {
@ -279,8 +277,7 @@ fn local_storage_handler(
let opts = parse_options(opts_vec); let opts = parse_options(opts_vec);
let mode = opts.get("mode"); let mode = opts.get("mode");
if mode.is_some() { if let Some(mode) = mode {
let mode = mode.unwrap();
let mut permission = fs::metadata(&storage.mount_point)?.permissions(); let mut permission = fs::metadata(&storage.mount_point)?.permissions();
let o_mode = u32::from_str_radix(mode, 8)?; let o_mode = u32::from_str_radix(mode, 8)?;
@ -410,17 +407,17 @@ fn parse_mount_flags_and_options(options_vec: Vec<&str>) -> (MsFlags, String) {
let mut options: String = "".to_string(); let mut options: String = "".to_string();
for opt in options_vec { for opt in options_vec {
if opt.len() != 0 { if !opt.is_empty() {
match FLAGS.get(opt) { match FLAGS.get(opt) {
Some(x) => { Some(x) => {
let (_, f) = *x; let (_, f) = *x;
flags = flags | f; flags |= f;
} }
None => { None => {
if options.len() > 0 { if !options.is_empty() {
options.push_str(format!(",{}", opt).as_str()); options.push_str(format!(",{}", opt).as_str());
} else { } else {
options.push_str(format!("{}", opt).as_str()); options.push_str(opt.to_string().as_str());
} }
} }
}; };
@ -458,7 +455,7 @@ pub fn add_storages(
// Todo need to rollback the mounted storage if err met. // Todo need to rollback the mounted storage if err met.
let mount_point = handler(&logger, &storage, sandbox.clone())?; let mount_point = handler(&logger, &storage, sandbox.clone())?;
if mount_point.len() > 0 { if !mount_point.is_empty() {
mount_list.push(mount_point); mount_list.push(mount_point);
} }
} }
@ -570,10 +567,10 @@ pub fn get_cgroup_mounts(
'outer: for (_, line) in reader.lines().enumerate() { 'outer: for (_, line) in reader.lines().enumerate() {
let line = line?; let line = line?;
let fields: Vec<&str> = line.split("\t").collect(); let fields: Vec<&str> = line.split('\t').collect();
// Ignore comment header // Ignore comment header
if fields[0].starts_with("#") { if fields[0].starts_with('#') {
continue; continue;
} }
@ -643,7 +640,7 @@ pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<
Ok(()) Ok(())
} }
pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> { pub fn remove_mounts(mounts: &[String]) -> Result<()> {
for m in mounts.iter() { for m in mounts.iter() {
mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?; mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
} }
@ -675,7 +672,7 @@ fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
fn parse_options(option_list: Vec<String>) -> HashMap<String, String> { fn parse_options(option_list: Vec<String>) -> HashMap<String, String> {
let mut options = HashMap::new(); let mut options = HashMap::new();
for opt in option_list.iter() { for opt in option_list.iter() {
let fields: Vec<&str> = opt.split("=").collect(); let fields: Vec<&str> = opt.split('=').collect();
if fields.len() != 2 { if fields.len() != 2 {
continue; continue;
} }
@ -856,7 +853,7 @@ mod tests {
let msg = format!("{}: umount result: {:?}", msg, result); let msg = format!("{}: umount result: {:?}", msg, result);
assert!(ret == 0, format!("{}", msg)); assert!(ret == 0, msg);
}; };
continue; continue;
@ -914,7 +911,8 @@ mod tests {
.expect("failed to create mount destination filename"); .expect("failed to create mount destination filename");
for d in [test_dir_filename, mnt_src_filename, mnt_dest_filename].iter() { for d in [test_dir_filename, mnt_src_filename, mnt_dest_filename].iter() {
std::fs::create_dir_all(d).expect(&format!("failed to create directory {}", d)); std::fs::create_dir_all(d)
.unwrap_or_else(|_| panic!("failed to create directory {}", d));
} }
// Create an actual mount // Create an actual mount
@ -1055,13 +1053,13 @@ mod tests {
let filename = file_path let filename = file_path
.to_str() .to_str()
.expect(&format!("{}: failed to create filename", msg)); .unwrap_or_else(|| panic!("{}: failed to create filename", msg));
let mut file = let mut file =
File::create(filename).expect(&format!("{}: failed to create file", msg)); File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes()) file.write_all(d.contents.as_bytes())
.expect(&format!("{}: failed to write file contents", msg)); .unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
let result = get_mount_fs_type_from_file(filename, d.mount_point); let result = get_mount_fs_type_from_file(filename, d.mount_point);
@ -1217,10 +1215,10 @@ mod tests {
.expect("failed to create cgroup file filename"); .expect("failed to create cgroup file filename");
let mut file = let mut file =
File::create(filename).expect(&format!("{}: failed to create file", msg)); File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
file.write_all(d.contents.as_bytes()) file.write_all(d.contents.as_bytes())
.expect(&format!("{}: failed to write file contents", msg)); .unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
let result = get_cgroup_mounts(&logger, filename, false); let result = get_cgroup_mounts(&logger, filename, false);
let msg = format!("{}: result: {:?}", msg, result); let msg = format!("{}: result: {:?}", msg, result);

View File

@ -52,12 +52,12 @@ impl Namespace {
} }
} }
pub fn as_ipc(mut self) -> Self { pub fn get_ipc(mut self) -> Self {
self.ns_type = NamespaceType::IPC; self.ns_type = NamespaceType::IPC;
self self
} }
pub fn as_uts(mut self, hostname: &str) -> Self { pub fn get_uts(mut self, hostname: &str) -> Self {
self.ns_type = NamespaceType::UTS; self.ns_type = NamespaceType::UTS;
if hostname != "" { if hostname != "" {
self.hostname = Some(String::from(hostname)); self.hostname = Some(String::from(hostname));
@ -65,7 +65,7 @@ impl Namespace {
self self
} }
pub fn as_pid(mut self) -> Self { pub fn get_pid(mut self) -> Self {
self.ns_type = NamespaceType::PID; self.ns_type = NamespaceType::PID;
self self
} }
@ -81,7 +81,7 @@ impl Namespace {
fs::create_dir_all(&self.persistent_ns_dir)?; fs::create_dir_all(&self.persistent_ns_dir)?;
let ns_path = PathBuf::from(&self.persistent_ns_dir); let ns_path = PathBuf::from(&self.persistent_ns_dir);
let ns_type = self.ns_type.clone(); let ns_type = self.ns_type;
let logger = self.logger.clone(); let logger = self.logger.clone();
let new_ns_path = ns_path.join(&ns_type.get()); let new_ns_path = ns_path.join(&ns_type.get());
@ -97,7 +97,7 @@ impl Namespace {
File::open(Path::new(&origin_ns_path))?; File::open(Path::new(&origin_ns_path))?;
// Create a new netns on the current thread. // Create a new netns on the current thread.
let cf = ns_type.get_flags().clone(); let cf = ns_type.get_flags();
unshare(cf)?; unshare(cf)?;
@ -110,12 +110,9 @@ impl Namespace {
let mut flags = MsFlags::empty(); let mut flags = MsFlags::empty();
match FLAGS.get("rbind") { if let Some(x) = FLAGS.get("rbind") {
Some(x) => { let (_, f) = *x;
let (_, f) = *x; flags |= f;
flags = flags | f;
}
None => (),
}; };
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger); let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
@ -194,23 +191,23 @@ mod tests {
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap(); let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
let ns_ipc = Namespace::new(&logger) let ns_ipc = Namespace::new(&logger)
.as_ipc() .get_ipc()
.set_root_dir(tmpdir.path().to_str().unwrap()) .set_root_dir(tmpdir.path().to_str().unwrap())
.setup(); .setup();
assert!(ns_ipc.is_ok()); assert!(ns_ipc.is_ok());
assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok()); assert!(remove_mounts(&[ns_ipc.unwrap().path]).is_ok());
let logger = slog::Logger::root(slog::Discard, o!()); let logger = slog::Logger::root(slog::Discard, o!());
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap(); let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
let ns_uts = Namespace::new(&logger) let ns_uts = Namespace::new(&logger)
.as_uts("test_hostname") .get_uts("test_hostname")
.set_root_dir(tmpdir.path().to_str().unwrap()) .set_root_dir(tmpdir.path().to_str().unwrap())
.setup(); .setup();
assert!(ns_uts.is_ok()); assert!(ns_uts.is_ok());
assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok()); assert!(remove_mounts(&[ns_uts.unwrap().path]).is_ok());
} }
#[test] #[test]

View File

@ -48,7 +48,7 @@ pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> { fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
let logger = logger.new(o!( "subsystem" => "network")); let logger = logger.new(o!( "subsystem" => "network"));
if dns_list.len() == 0 { if dns_list.is_empty() {
info!( info!(
logger, logger,
"Did not set sandbox DNS as DNS not received as part of request." "Did not set sandbox DNS as DNS not received as part of request."
@ -117,12 +117,12 @@ mod tests {
]; ];
// write to /run/kata-containers/sandbox/resolv.conf // write to /run/kata-containers/sandbox/resolv.conf
let mut src_file = let mut src_file = File::create(src_filename)
File::create(src_filename).expect(&format!("failed to create file {:?}", src_filename)); .unwrap_or_else(|_| panic!("failed to create file {:?}", src_filename));
let content = dns.join("\n"); let content = dns.join("\n");
src_file src_file
.write_all(content.as_bytes()) .write_all(content.as_bytes())
.expect(&format!("failed to write file contents")); .expect("failed to write file contents");
// call do_setup_guest_dns // call do_setup_guest_dns
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename); let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);

View File

@ -4,7 +4,6 @@
// //
use anyhow::Result; use anyhow::Result;
use libc;
use nix::errno::Errno; use nix::errno::Errno;
use nix::fcntl::{self, OFlag}; use nix::fcntl::{self, OFlag};
use nix::sys::stat::Mode; use nix::sys::stat::Mode;

View File

@ -21,7 +21,6 @@ use protocols::health::{
HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse, HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse,
}; };
use protocols::types::Interface; use protocols::types::Interface;
use rustjail;
use rustjail::cgroups::notifier; use rustjail::cgroups::notifier;
use rustjail::container::{BaseContainer, Container, LinuxContainer}; use rustjail::container::{BaseContainer, Container, LinuxContainer};
use rustjail::process::Process; use rustjail::process::Process;
@ -47,7 +46,6 @@ use crate::AGENT_CONFIG;
use netlink::{RtnlHandle, NETLINK_ROUTE}; use netlink::{RtnlHandle, NETLINK_ROUTE};
use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ}; use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ};
use serde_json;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fs; use std::fs;
use std::os::unix::io::RawFd; use std::os::unix::io::RawFd;
@ -152,14 +150,13 @@ impl agentService {
let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size; let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size;
let p = if oci.process.is_some() { let p = if oci.process.is_some() {
let tp = Process::new( Process::new(
&sl!(), &sl!(),
&oci.process.as_ref().unwrap(), &oci.process.as_ref().unwrap(),
cid.as_str(), cid.as_str(),
true, true,
pipe_size, pipe_size,
)?; )?
tp
} else { } else {
info!(sl!(), "no process configurations!"); info!(sl!(), "no process configurations!");
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
@ -175,7 +172,7 @@ impl agentService {
} }
fn do_start_container(&self, req: protocols::agent::StartContainerRequest) -> Result<()> { fn do_start_container(&self, req: protocols::agent::StartContainerRequest) -> Result<()> {
let cid = req.container_id.clone(); let cid = req.container_id;
let sandbox = self.sandbox.clone(); let sandbox = self.sandbox.clone();
let mut s = sandbox.lock().unwrap(); let mut s = sandbox.lock().unwrap();
@ -183,7 +180,7 @@ impl agentService {
let ctr = s let ctr = s
.get_container(&cid) .get_container(&cid)
.ok_or(anyhow!("Invalid container id"))?; .ok_or_else(|| anyhow!("Invalid container id"))?;
ctr.exec()?; ctr.exec()?;
@ -206,9 +203,7 @@ impl agentService {
let mut remove_container_resources = |sandbox: &mut Sandbox| -> Result<()> { let mut remove_container_resources = |sandbox: &mut Sandbox| -> Result<()> {
// Find the sandbox storage used by this container // Find the sandbox storage used by this container
let mounts = sandbox.container_mounts.get(&cid); let mounts = sandbox.container_mounts.get(&cid);
if mounts.is_some() { if let Some(mounts) = mounts {
let mounts = mounts.unwrap();
remove_mounts(&mounts)?; remove_mounts(&mounts)?;
for m in mounts.iter() { for m in mounts.iter() {
@ -232,7 +227,7 @@ impl agentService {
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let ctr = sandbox let ctr = sandbox
.get_container(&cid) .get_container(&cid)
.ok_or(anyhow!("Invalid container id"))?; .ok_or_else(|| anyhow!("Invalid container id"))?;
ctr.destroy()?; ctr.destroy()?;
@ -250,11 +245,11 @@ impl agentService {
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let _ctr = sandbox let _ctr = sandbox
.get_container(&cid2) .get_container(&cid2)
.ok_or(anyhow!("Invalid container id")) .ok_or_else(|| anyhow!("Invalid container id"))
.and_then(|ctr| { .map(|ctr| {
ctr.destroy().unwrap(); ctr.destroy().unwrap();
tx.send(1).unwrap(); tx.send(1).unwrap();
Ok(ctr) ctr
}); });
}); });
@ -277,7 +272,7 @@ impl agentService {
let cid = req.container_id.clone(); let cid = req.container_id.clone();
let exec_id = req.exec_id.clone(); let exec_id = req.exec_id.clone();
info!(sl!(), "cid: {} eid: {}", cid.clone(), exec_id.clone()); info!(sl!(), "cid: {} eid: {}", cid, exec_id);
let s = self.sandbox.clone(); let s = self.sandbox.clone();
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
@ -294,7 +289,7 @@ impl agentService {
let ctr = sandbox let ctr = sandbox
.get_container(&cid) .get_container(&cid)
.ok_or(anyhow!("Invalid container id"))?; .ok_or_else(|| anyhow!("Invalid container id"))?;
ctr.run(p)?; ctr.run(p)?;
@ -340,7 +335,7 @@ impl agentService {
req: protocols::agent::WaitProcessRequest, req: protocols::agent::WaitProcessRequest,
) -> Result<protocols::agent::WaitProcessResponse> { ) -> Result<protocols::agent::WaitProcessResponse> {
let cid = req.container_id.clone(); let cid = req.container_id.clone();
let eid = req.exec_id.clone(); let eid = req.exec_id;
let s = self.sandbox.clone(); let s = self.sandbox.clone();
let mut resp = WaitProcessResponse::new(); let mut resp = WaitProcessResponse::new();
let pid: pid_t; let pid: pid_t;
@ -376,7 +371,7 @@ impl agentService {
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let ctr = sandbox let ctr = sandbox
.get_container(&cid) .get_container(&cid)
.ok_or(anyhow!("Invalid container id"))?; .ok_or_else(|| anyhow!("Invalid container id"))?;
let mut p = match ctr.processes.get_mut(&pid) { let mut p = match ctr.processes.get_mut(&pid) {
Some(p) => p, Some(p) => p,
@ -584,16 +579,18 @@ impl protocols::agent_ttrpc::AgentService for agentService {
) -> ttrpc::Result<ListProcessesResponse> { ) -> ttrpc::Result<ListProcessesResponse> {
let cid = req.container_id.clone(); let cid = req.container_id.clone();
let format = req.format.clone(); let format = req.format.clone();
let mut args = req.args.clone().into_vec(); let mut args = req.args.into_vec();
let mut resp = ListProcessesResponse::new(); let mut resp = ListProcessesResponse::new();
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error( let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc::Code::INVALID_ARGUMENT, ttrpc_error(
"invalid container id".to_string(), ttrpc::Code::INVALID_ARGUMENT,
))?; "invalid container id".to_string(),
)
})?;
let pids = ctr.processes().unwrap(); let pids = ctr.processes().unwrap();
@ -612,7 +609,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
} }
// format "table" // format "table"
if args.len() == 0 { if args.is_empty() {
// default argument // default argument
args = vec!["-ef".to_string()]; args = vec!["-ef".to_string()];
} }
@ -670,10 +667,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error( let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc::Code::INVALID_ARGUMENT, ttrpc_error(
"invalid container id".to_string(), ttrpc::Code::INVALID_ARGUMENT,
))?; "invalid container id".to_string(),
)
})?;
let resp = Empty::new(); let resp = Empty::new();
@ -696,14 +695,16 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::StatsContainerRequest, req: protocols::agent::StatsContainerRequest,
) -> ttrpc::Result<StatsContainerResponse> { ) -> ttrpc::Result<StatsContainerResponse> {
let cid = req.container_id.clone(); let cid = req.container_id;
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error( let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc::Code::INVALID_ARGUMENT, ttrpc_error(
"invalid container id".to_string(), ttrpc::Code::INVALID_ARGUMENT,
))?; "invalid container id".to_string(),
)
})?;
ctr.stats() ctr.stats()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string())) .map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
@ -718,10 +719,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error( let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc::Code::INVALID_ARGUMENT, ttrpc_error(
"invalid container id".to_string(), ttrpc::Code::INVALID_ARGUMENT,
))?; "invalid container id".to_string(),
)
})?;
ctr.pause() ctr.pause()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?; .map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
@ -738,10 +741,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error( let ctr = sandbox.get_container(&cid).ok_or_else(|| {
ttrpc::Code::INVALID_ARGUMENT, ttrpc_error(
"invalid container id".to_string(), ttrpc::Code::INVALID_ARGUMENT,
))?; "invalid container id".to_string(),
)
})?;
ctr.resume() ctr.resume()
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?; .map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
@ -782,7 +787,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
req: protocols::agent::CloseStdinRequest, req: protocols::agent::CloseStdinRequest,
) -> ttrpc::Result<Empty> { ) -> ttrpc::Result<Empty> {
let cid = req.container_id.clone(); let cid = req.container_id.clone();
let eid = req.exec_id.clone(); let eid = req.exec_id;
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
@ -852,11 +857,11 @@ impl protocols::agent_ttrpc::AgentService for agentService {
if req.interface.is_none() { if req.interface.is_none() {
return Err(ttrpc_error( return Err(ttrpc_error(
ttrpc::Code::INVALID_ARGUMENT, ttrpc::Code::INVALID_ARGUMENT,
format!("empty update interface request"), "empty update interface request".to_string(),
)); ));
} }
let interface = req.interface.clone(); let interface = req.interface;
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
@ -884,11 +889,11 @@ impl protocols::agent_ttrpc::AgentService for agentService {
if req.routes.is_none() { if req.routes.is_none() {
return Err(ttrpc_error( return Err(ttrpc_error(
ttrpc::Code::INVALID_ARGUMENT, ttrpc::Code::INVALID_ARGUMENT,
format!("empty update routes request"), "empty update routes request".to_string(),
)); ));
} }
let rs = req.routes.clone().unwrap().Routes.into_vec(); let rs = req.routes.unwrap().Routes.into_vec();
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
@ -1002,7 +1007,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
}); });
} }
if req.sandbox_id.len() > 0 { if !req.sandbox_id.is_empty() {
s.id = req.sandbox_id.clone(); s.id = req.sandbox_id.clone();
} }
@ -1028,7 +1033,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
Ok(_) => { Ok(_) => {
let sandbox = self.sandbox.clone(); let sandbox = self.sandbox.clone();
let mut s = sandbox.lock().unwrap(); let mut s = sandbox.lock().unwrap();
let _ = req let _dns = req
.dns .dns
.to_vec() .to_vec()
.iter() .iter()
@ -1065,11 +1070,11 @@ impl protocols::agent_ttrpc::AgentService for agentService {
if req.neighbors.is_none() { if req.neighbors.is_none() {
return Err(ttrpc_error( return Err(ttrpc_error(
ttrpc::Code::INVALID_ARGUMENT, ttrpc::Code::INVALID_ARGUMENT,
format!("empty add arp neighbours request"), "empty add arp neighbours request".to_string(),
)); ));
} }
let neighs = req.neighbors.clone().unwrap().ARPNeighbors.into_vec(); let neighs = req.neighbors.unwrap().ARPNeighbors.into_vec();
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
@ -1198,12 +1203,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
drop(sandbox); drop(sandbox);
match event_rx.recv() { match event_rx.recv() {
Err(err) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, err.to_string())), Err(err) => Err(ttrpc_error(ttrpc::Code::INTERNAL, err.to_string())),
Ok(container_id) => { Ok(container_id) => {
info!(sl!(), "get_oom_event return {}", &container_id); info!(sl!(), "get_oom_event return {}", &container_id);
let mut resp = OOMEvent::new(); let mut resp = OOMEvent::new();
resp.container_id = container_id; resp.container_id = container_id;
return Ok(resp); Ok(resp)
} }
} }
} }
@ -1243,7 +1248,7 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
if block_size { if block_size {
match fs::read_to_string(SYSFS_MEMORY_BLOCK_SIZE_PATH) { match fs::read_to_string(SYSFS_MEMORY_BLOCK_SIZE_PATH) {
Ok(v) => { Ok(v) => {
if v.len() == 0 { if v.is_empty() {
info!(sl!(), "string in empty???"); info!(sl!(), "string in empty???");
return Err(anyhow!("Invalid block size")); return Err(anyhow!("Invalid block size"));
} }
@ -1322,7 +1327,7 @@ fn read_stream(fd: RawFd, l: usize) -> Result<Vec<u8>> {
} }
Err(e) => match e { Err(e) => match e {
nix::Error::Sys(errno) => match errno { nix::Error::Sys(errno) => match errno {
Errno::EAGAIN => v.resize(0, 0), Errno::EAGAIN => v.clear(),
_ => return Err(anyhow!(nix::Error::Sys(errno))), _ => return Err(anyhow!(nix::Error::Sys(errno))),
}, },
_ => return Err(anyhow!("read error")), _ => return Err(anyhow!("read error")),
@ -1340,13 +1345,13 @@ fn find_process<'a>(
) -> Result<&'a mut Process> { ) -> Result<&'a mut Process> {
let ctr = sandbox let ctr = sandbox
.get_container(cid) .get_container(cid)
.ok_or(anyhow!("Invalid container id"))?; .ok_or_else(|| anyhow!("Invalid container id"))?;
if init || eid == "" { if init || eid == "" {
return ctr return ctr
.processes .processes
.get_mut(&ctr.init_process_pid) .get_mut(&ctr.init_process_pid)
.ok_or(anyhow!("cannot find init process!")); .ok_or_else(|| anyhow!("cannot find init process!"));
} }
ctr.get_process(eid).map_err(|_| anyhow!("Invalid exec id")) ctr.get_process(eid).map_err(|_| anyhow!("Invalid exec id"))
@ -1396,7 +1401,7 @@ fn update_container_namespaces(
let linux = spec let linux = spec
.linux .linux
.as_mut() .as_mut()
.ok_or(anyhow!("Spec didn't container linux field"))?; .ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
let namespaces = linux.namespaces.as_mut_slice(); let namespaces = linux.namespaces.as_mut_slice();
for namespace in namespaces.iter_mut() { for namespace in namespaces.iter_mut() {
@ -1464,7 +1469,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
} }
}; };
if line.starts_with("SigCgt:") { if line.starts_with("SigCgt:") {
let mask_vec: Vec<&str> = line.split(":").collect(); let mask_vec: Vec<&str> = line.split(':').collect();
if mask_vec.len() != 2 { if mask_vec.len() != 2 {
warn!(sl!(), "parse the SigCgt field failed\n"); warn!(sl!(), "parse the SigCgt field failed\n");
return false; return false;
@ -1484,7 +1489,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
false false
} }
fn do_mem_hotplug_by_probe(addrs: &Vec<u64>) -> Result<()> { fn do_mem_hotplug_by_probe(addrs: &[u64]) -> Result<()> {
for addr in addrs.iter() { for addr in addrs.iter() {
fs::write(SYSFS_MEMORY_HOTPLUG_PROBE_PATH, format!("{:#X}", *addr))?; fs::write(SYSFS_MEMORY_HOTPLUG_PROBE_PATH, format!("{:#X}", *addr))?;
} }
@ -1497,8 +1502,12 @@ fn do_set_guest_date_time(sec: i64, usec: i64) -> Result<()> {
tv_usec: usec, tv_usec: usec,
}; };
let ret = let ret = unsafe {
unsafe { libc::settimeofday(&tv as *const libc::timeval, 0 as *const libc::timezone) }; libc::settimeofday(
&tv as *const libc::timeval,
std::ptr::null::<libc::timezone>(),
)
};
Errno::result(ret).map(drop)?; Errno::result(ret).map(drop)?;
@ -1514,8 +1523,8 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
let parent = path.parent(); let parent = path.parent();
let dir = if parent.is_some() { let dir = if let Some(parent) = parent {
parent.unwrap().to_path_buf() parent.to_path_buf()
} else { } else {
PathBuf::from("/") PathBuf::from("/")
}; };
@ -1575,8 +1584,8 @@ fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
let spec_root = spec.root.as_ref().unwrap(); let spec_root = spec.root.as_ref().unwrap();
let bundle_path = Path::new(CONTAINER_BASE).join(cid); let bundle_path = Path::new(CONTAINER_BASE).join(cid);
let config_path = bundle_path.clone().join("config.json"); let config_path = bundle_path.join("config.json");
let rootfs_path = bundle_path.clone().join("rootfs"); let rootfs_path = bundle_path.join("rootfs");
fs::create_dir_all(&rootfs_path)?; fs::create_dir_all(&rootfs_path)?;
BareMount::new( BareMount::new(
@ -1640,9 +1649,9 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
"load_kernel_module return code: {} stdout:{} stderr:{}", "load_kernel_module return code: {} stdout:{} stderr:{}",
code, std_out, std_err code, std_out, std_err
); );
return Err(anyhow!(msg)); Err(anyhow!(msg))
} }
None => return Err(anyhow!("Process terminated by signal")), None => Err(anyhow!("Process terminated by signal")),
} }
} }
@ -1654,17 +1663,16 @@ mod tests {
use std::sync::mpsc::{Receiver, Sender}; use std::sync::mpsc::{Receiver, Sender};
use ttrpc::{MessageHeader, TtrpcContext}; use ttrpc::{MessageHeader, TtrpcContext};
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<(MessageHeader, Vec<u8>)>) { type Message = (MessageHeader, Vec<u8>);
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<Message>) {
let mh = MessageHeader::default(); let mh = MessageHeader::default();
let (tx, rx): ( let (tx, rx): (Sender<Message>, Receiver<Message>) = channel();
Sender<(MessageHeader, Vec<u8>)>,
Receiver<(MessageHeader, Vec<u8>)>,
) = channel();
let ctx = TtrpcContext { let ctx = TtrpcContext {
fd: -1, fd: -1,
mh: mh, mh,
res_tx: tx, res_tx: tx,
}; };

View File

@ -75,7 +75,7 @@ impl Sandbox {
sender: None, sender: None,
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()), rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
hooks: None, hooks: None,
event_rx: event_rx, event_rx,
event_tx: tx, event_tx: tx,
}) })
} }
@ -112,14 +112,14 @@ impl Sandbox {
// acquiring a lock on sandbox. // acquiring a lock on sandbox.
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> { pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
match self.storages.get_mut(path) { match self.storages.get_mut(path) {
None => return Err(anyhow!("Sandbox storage with path {} not found", path)), None => Err(anyhow!("Sandbox storage with path {} not found", path)),
Some(count) => { Some(count) => {
*count -= 1; *count -= 1;
if *count < 1 { if *count < 1 {
self.storages.remove(path); self.storages.remove(path);
return Ok(true); return Ok(true);
} }
return Ok(false); Ok(false)
} }
} }
} }
@ -161,13 +161,13 @@ impl Sandbox {
pub fn setup_shared_namespaces(&mut self) -> Result<bool> { pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
// Set up shared IPC namespace // Set up shared IPC namespace
self.shared_ipcns = Namespace::new(&self.logger) self.shared_ipcns = Namespace::new(&self.logger)
.as_ipc() .get_ipc()
.setup() .setup()
.context("Failed to setup persistent IPC namespace")?; .context("Failed to setup persistent IPC namespace")?;
// // Set up shared UTS namespace // // Set up shared UTS namespace
self.shared_utsns = Namespace::new(&self.logger) self.shared_utsns = Namespace::new(&self.logger)
.as_uts(self.hostname.as_str()) .get_uts(self.hostname.as_str())
.setup() .setup()
.context("Failed to setup persistent UTS namespace")?; .context("Failed to setup persistent UTS namespace")?;
@ -184,7 +184,7 @@ impl Sandbox {
// This means a separate pause process has not been created. We treat the // This means a separate pause process has not been created. We treat the
// first container created as the infra container in that case // first container created as the infra container in that case
// and use its pid namespace in case pid namespace needs to be shared. // and use its pid namespace in case pid namespace needs to be shared.
if self.sandbox_pidns.is_none() && self.containers.len() == 0 { if self.sandbox_pidns.is_none() && self.containers.is_empty() {
let init_pid = c.init_process_pid; let init_pid = c.init_process_pid;
if init_pid == -1 { if init_pid == -1 {
return Err(anyhow!( return Err(anyhow!(
@ -192,7 +192,7 @@ impl Sandbox {
)); ));
} }
let mut pid_ns = Namespace::new(&self.logger).as_pid(); let mut pid_ns = Namespace::new(&self.logger).get_pid();
pid_ns.path = format!("/proc/{}/ns/pid", init_pid); pid_ns.path = format!("/proc/{}/ns/pid", init_pid);
self.sandbox_pidns = Some(pid_ns); self.sandbox_pidns = Some(pid_ns);
@ -216,7 +216,7 @@ impl Sandbox {
} }
pub fn destroy(&mut self) -> Result<()> { pub fn destroy(&mut self) -> Result<()> {
for (_, ctr) in &mut self.containers { for ctr in self.containers.values_mut() {
ctr.destroy()?; ctr.destroy()?;
} }
Ok(()) Ok(())
@ -332,7 +332,7 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
} }
let c = c.unwrap(); let c = c.unwrap();
if c.trim().contains("0") { if c.trim().contains('0') {
let r = fs::write(file.as_str(), "1"); let r = fs::write(file.as_str(), "1");
if r.is_err() { if r.is_err() {
continue; continue;
@ -612,8 +612,8 @@ mod tests {
let linux = Linux::default(); let linux = Linux::default();
let mut spec = Spec::default(); let mut spec = Spec::default();
spec.root = Some(root).into(); spec.root = Some(root);
spec.linux = Some(linux).into(); spec.linux = Some(linux);
CreateOpts { CreateOpts {
cgroup_name: "".to_string(), cgroup_name: "".to_string(),

View File

@ -2,6 +2,7 @@
// //
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#![allow(clippy::module_inception)]
#[cfg(test)] #[cfg(test)]
mod test_utils { mod test_utils {

View File

@ -289,7 +289,7 @@ languages:
description: | description: |
'newest-version' is the latest version known to work when 'newest-version' is the latest version known to work when
building Kata building Kata
newest-version: "1.44.1" newest-version: "1.47.0"
specs: specs:
description: "Details of important specifications" description: "Details of important specifications"