mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-28 08:17:37 +00:00
Merge pull request #977 from liubin/fix/951-clear-clippy-warnings
agent: clear clippy warnings
This commit is contained in:
commit
074b5332aa
1
.gitignore
vendored
1
.gitignore
vendored
@ -3,5 +3,6 @@
|
||||
**/*.rej
|
||||
**/target
|
||||
**/.vscode
|
||||
pkg/logging/Cargo.lock
|
||||
src/agent/src/version.rs
|
||||
src/agent/kata-agent.service
|
||||
|
12
.travis.yml
12
.travis.yml
@ -15,7 +15,7 @@ os: linux
|
||||
# https://docs.travis-ci.com/user/caching#clearing-caches
|
||||
language: rust
|
||||
rust:
|
||||
- 1.44.1
|
||||
- 1.47.0
|
||||
cache:
|
||||
cargo: true
|
||||
directories:
|
||||
@ -36,9 +36,9 @@ install:
|
||||
- rustup target add x86_64-unknown-linux-musl
|
||||
- sudo ln -sf /usr/bin/g++ /bin/musl-g++
|
||||
- rustup component add rustfmt
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
- rustup component add clippy
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent clippy
|
||||
- "ci/static-checks.sh"
|
||||
|
||||
before_script:
|
||||
- "ci/install_go.sh"
|
||||
@ -47,7 +47,9 @@ before_script:
|
||||
- sudo -E PATH=$PATH GOPATH=$GOPATH make -C ${TRAVIS_BUILD_DIR}/src/runtime test
|
||||
|
||||
script:
|
||||
- "ci/static-checks.sh"
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent
|
||||
- make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
- sudo -E PATH=$PATH make -C ${TRAVIS_BUILD_DIR}/src/agent check
|
||||
|
||||
jobs:
|
||||
include:
|
||||
|
@ -93,9 +93,7 @@ impl HashSerializer {
|
||||
// Take care to only add the first instance of a key. This matters for loggers (but not
|
||||
// Records) since a child loggers have parents and the loggers are serialised child first
|
||||
// meaning the *newest* fields are serialised first.
|
||||
if !self.fields.contains_key(&key) {
|
||||
self.fields.insert(key, value);
|
||||
}
|
||||
self.fields.entry(key).or_insert(value);
|
||||
}
|
||||
|
||||
fn remove_field(&mut self, key: &str) {
|
||||
|
@ -121,6 +121,12 @@ optimize: $(SOURCES) | show-summary show-header
|
||||
show-header:
|
||||
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
|
||||
|
||||
clippy: $(GENERATED_CODE)
|
||||
cargo clippy --all-targets --all-features --release \
|
||||
-- \
|
||||
-Aclippy::redundant_allocation \
|
||||
-D warnings
|
||||
|
||||
$(GENERATED_FILES): %: %.in
|
||||
@sed $(foreach r,$(GENERATED_REPLACEMENTS),-e 's|@$r@|$($r)|g') "$<" > "$@"
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
//
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use std::error;
|
||||
use std::fmt::{Display, Formatter, Result as FmtResult};
|
||||
|
@ -3,6 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#![allow(bare_trait_objects)]
|
||||
#![allow(clippy::redundant_field_names)]
|
||||
|
||||
pub mod agent;
|
||||
pub mod agent_ttrpc;
|
||||
@ -11,11 +12,3 @@ pub mod health;
|
||||
pub mod health_ttrpc;
|
||||
pub mod oci;
|
||||
pub mod types;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#[test]
|
||||
fn it_works() {
|
||||
assert_eq!(2 + 2, 4);
|
||||
}
|
||||
}
|
||||
|
@ -6,8 +6,6 @@
|
||||
// looks like we can use caps to manipulate capabilities
|
||||
// conveniently, use caps to do it directly.. maybe
|
||||
|
||||
use lazy_static;
|
||||
|
||||
use crate::log_child;
|
||||
use crate::sync::write_count;
|
||||
use anyhow::{anyhow, Result};
|
||||
|
@ -21,7 +21,6 @@ use cgroups::{
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use lazy_static;
|
||||
use libc::{self, pid_t};
|
||||
use nix::errno::Errno;
|
||||
use oci::{
|
||||
@ -46,18 +45,19 @@ macro_rules! sl {
|
||||
}
|
||||
|
||||
pub fn load_or_create<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Cgroup<'a> {
|
||||
let valid_path = path.trim_start_matches("/").to_string();
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
let cg = load(h.clone(), &valid_path);
|
||||
if cg.is_none() {
|
||||
match cg {
|
||||
Some(cg) => cg,
|
||||
None => {
|
||||
info!(sl!(), "create new cgroup: {}", &valid_path);
|
||||
cgroups::Cgroup::new(h, valid_path.as_str())
|
||||
} else {
|
||||
cg.unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option<Cgroup<'a>> {
|
||||
let valid_path = path.trim_start_matches("/").to_string();
|
||||
let valid_path = path.trim_start_matches('/').to_string();
|
||||
let cg = cgroups::Cgroup::load(h, valid_path.as_str());
|
||||
let cpu_controller: &CpuController = cg.controller_of().unwrap();
|
||||
if cpu_controller.exists() {
|
||||
@ -126,7 +126,7 @@ impl CgroupManager for Manager {
|
||||
}
|
||||
|
||||
// set hugepages resources
|
||||
if r.hugepage_limits.len() > 0 {
|
||||
if !r.hugepage_limits.is_empty() {
|
||||
set_hugepages_resources(&cg, &r.hugepage_limits, res)?;
|
||||
}
|
||||
|
||||
@ -210,8 +210,8 @@ impl CgroupManager for Manager {
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
let cg = load(h, &self.cpath);
|
||||
if cg.is_some() {
|
||||
cg.unwrap().delete();
|
||||
if let Some(cg) = cg {
|
||||
cg.delete();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -259,7 +259,7 @@ fn set_network_resources(
|
||||
|
||||
fn set_devices_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
device_resources: &Vec<LinuxDeviceCgroup>,
|
||||
device_resources: &[LinuxDeviceCgroup],
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
info!(sl!(), "cgroup manager set devices");
|
||||
@ -291,7 +291,7 @@ fn set_devices_resources(
|
||||
|
||||
fn set_hugepages_resources(
|
||||
_cg: &cgroups::Cgroup,
|
||||
hugepage_limits: &Vec<LinuxHugepageLimit>,
|
||||
hugepage_limits: &[LinuxHugepageLimit],
|
||||
res: &mut cgroups::Resources,
|
||||
) -> Result<()> {
|
||||
info!(sl!(), "cgroup manager set hugepage");
|
||||
@ -453,7 +453,7 @@ fn set_pids_resources(cg: &cgroups::Cgroup, pids: &LinuxPids) -> Result<()> {
|
||||
}
|
||||
|
||||
fn build_blk_io_device_throttle_resource(
|
||||
input: &Vec<oci::LinuxThrottleDevice>,
|
||||
input: &[oci::LinuxThrottleDevice],
|
||||
) -> Vec<BlkIoDeviceThrottleResource> {
|
||||
let mut blk_io_device_throttle_resources = vec![];
|
||||
for d in input.iter() {
|
||||
@ -685,7 +685,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
|
||||
|
||||
// use_hierarchy
|
||||
let value = memory.use_hierarchy;
|
||||
let use_hierarchy = if value == 1 { true } else { false };
|
||||
let use_hierarchy = value == 1;
|
||||
|
||||
// gte memory datas
|
||||
let usage = SingularPtrField::some(MemoryData {
|
||||
@ -739,13 +739,12 @@ fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> {
|
||||
let current = pid_controller.get_pid_current().unwrap_or(0);
|
||||
let max = pid_controller.get_pid_max();
|
||||
|
||||
let limit = if max.is_err() {
|
||||
0
|
||||
} else {
|
||||
match max.unwrap() {
|
||||
let limit = match max {
|
||||
Err(_) => 0,
|
||||
Ok(max) => match max {
|
||||
MaxValue::Value(v) => v,
|
||||
MaxValue::Max => 0,
|
||||
}
|
||||
},
|
||||
} as u64;
|
||||
|
||||
SingularPtrField::some(PidsStats {
|
||||
@ -788,9 +787,9 @@ https://github.com/opencontainers/runc/blob/a5847db387ae28c0ca4ebe4beee1a76900c8
|
||||
Total 0
|
||||
*/
|
||||
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioStatsEntry> {
|
||||
fn get_blkio_stat_blkiodata(blkiodata: &[BlkIoData]) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
if blkiodata.len() == 0 {
|
||||
if blkiodata.is_empty() {
|
||||
return m;
|
||||
}
|
||||
|
||||
@ -810,10 +809,10 @@ fn get_blkio_stat_blkiodata(blkiodata: &Vec<BlkIoData>) -> RepeatedField<BlkioSt
|
||||
m
|
||||
}
|
||||
|
||||
fn get_blkio_stat_ioservice(services: &Vec<IoService>) -> RepeatedField<BlkioStatsEntry> {
|
||||
fn get_blkio_stat_ioservice(services: &[IoService]) -> RepeatedField<BlkioStatsEntry> {
|
||||
let mut m = RepeatedField::new();
|
||||
|
||||
if services.len() == 0 {
|
||||
if services.is_empty() {
|
||||
return m;
|
||||
}
|
||||
|
||||
@ -834,7 +833,7 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki
|
||||
major: major as u64,
|
||||
minor: minor as u64,
|
||||
op: op.to_string(),
|
||||
value: value,
|
||||
value,
|
||||
unknown_fields: UnknownFields::default(),
|
||||
cached_size: CachedSize::default(),
|
||||
}
|
||||
@ -875,7 +874,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
|
||||
let mut m = BlkioStats::new();
|
||||
let io_serviced_recursive = blkio.io_serviced_recursive;
|
||||
|
||||
if io_serviced_recursive.len() == 0 {
|
||||
if io_serviced_recursive.is_empty() {
|
||||
// fall back to generic stats
|
||||
// blkio.throttle.io_service_bytes,
|
||||
// maybe io_service_bytes_recursive?
|
||||
@ -930,8 +929,8 @@ fn get_hugetlb_stats(cg: &cgroups::Cgroup) -> HashMap<String, HugetlbStats> {
|
||||
h
|
||||
}
|
||||
|
||||
pub const PATHS: &'static str = "/proc/self/cgroup";
|
||||
pub const MOUNTS: &'static str = "/proc/self/mountinfo";
|
||||
pub const PATHS: &str = "/proc/self/cgroup";
|
||||
pub const MOUNTS: &str = "/proc/self/mountinfo";
|
||||
|
||||
pub fn get_paths() -> Result<HashMap<String, String>> {
|
||||
let mut m = HashMap::new();
|
||||
@ -1056,7 +1055,7 @@ impl Manager {
|
||||
if i == 0 {
|
||||
break;
|
||||
}
|
||||
i = i - 1;
|
||||
i -= 1;
|
||||
let h = cgroups::hierarchies::auto();
|
||||
let h = Box::new(&*h);
|
||||
|
||||
|
@ -41,7 +41,7 @@ fn get_value_from_cgroup(path: &PathBuf, key: &str) -> Result<i64> {
|
||||
);
|
||||
|
||||
for line in content.lines() {
|
||||
let arr: Vec<&str> = line.split(" ").collect();
|
||||
let arr: Vec<&str> = line.split(' ').collect();
|
||||
if arr.len() == 2 && arr[0] == key {
|
||||
let r = arr[1].parse::<i64>()?;
|
||||
return Ok(r);
|
||||
|
@ -4,12 +4,9 @@
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use dirs;
|
||||
use lazy_static;
|
||||
use libc::pid_t;
|
||||
use oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec};
|
||||
use oci::{LinuxDevice, LinuxIDMapping};
|
||||
use serde_json;
|
||||
use std::clone::Clone;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::fmt;
|
||||
@ -43,7 +40,6 @@ use nix::sys::signal::{self, Signal};
|
||||
use nix::sys::stat::{self, Mode};
|
||||
use nix::unistd::{self, ForkResult, Gid, Pid, Uid};
|
||||
|
||||
use libc;
|
||||
use protobuf::SingularPtrField;
|
||||
|
||||
use oci::State as OCIState;
|
||||
@ -54,9 +50,9 @@ use std::os::unix::io::FromRawFd;
|
||||
|
||||
use slog::{info, o, Logger};
|
||||
|
||||
const STATE_FILENAME: &'static str = "state.json";
|
||||
const EXEC_FIFO_FILENAME: &'static str = "exec.fifo";
|
||||
const VER_MARKER: &'static str = "1.2.5";
|
||||
const STATE_FILENAME: &str = "state.json";
|
||||
const EXEC_FIFO_FILENAME: &str = "exec.fifo";
|
||||
const VER_MARKER: &str = "1.2.5";
|
||||
const PID_NS_PATH: &str = "/proc/self/ns/pid";
|
||||
|
||||
const INIT: &str = "INIT";
|
||||
@ -551,7 +547,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
setid(uid, gid)?;
|
||||
|
||||
if guser.additional_gids.len() > 0 {
|
||||
if !guser.additional_gids.is_empty() {
|
||||
setgroups(guser.additional_gids.as_slice()).map_err(|e| {
|
||||
let _ = write_sync(
|
||||
cwfd,
|
||||
@ -595,7 +591,7 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
|
||||
|
||||
// setup the envs
|
||||
for e in env.iter() {
|
||||
let v: Vec<&str> = e.splitn(2, "=").collect();
|
||||
let v: Vec<&str> = e.splitn(2, '=').collect();
|
||||
if v.len() != 2 {
|
||||
continue;
|
||||
}
|
||||
@ -731,7 +727,7 @@ impl BaseContainer for LinuxContainer {
|
||||
info!(logger, "enter container.start!");
|
||||
let mut fifofd: RawFd = -1;
|
||||
if p.init {
|
||||
if let Ok(_) = stat::stat(fifo_file.as_str()) {
|
||||
if stat::stat(fifo_file.as_str()).is_ok() {
|
||||
return Err(anyhow!("exec fifo exists"));
|
||||
}
|
||||
unistd::mkfifo(fifo_file.as_str(), Mode::from_bits(0o622).unwrap())?;
|
||||
@ -931,7 +927,7 @@ impl BaseContainer for LinuxContainer {
|
||||
.join()
|
||||
.map_err(|e| warn!(logger, "joining log handler {:?}", e));
|
||||
info!(logger, "create process completed");
|
||||
return Ok(());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run(&mut self, p: Process) -> Result<()> {
|
||||
@ -1164,12 +1160,10 @@ fn join_namespaces(
|
||||
}
|
||||
|
||||
// apply cgroups
|
||||
if p.init {
|
||||
if res.is_some() {
|
||||
if p.init && res.is_some() {
|
||||
info!(logger, "apply cgroups!");
|
||||
cm.set(res.unwrap(), false)?;
|
||||
}
|
||||
}
|
||||
|
||||
if res.is_some() {
|
||||
cm.apply(p.pid)?;
|
||||
@ -1464,7 +1458,7 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
ForkResult::Child => {
|
||||
@ -1567,14 +1561,12 @@ fn execute_hook(logger: &Logger, h: &Hook, st: &OCIState) -> Result<()> {
|
||||
error
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if let Ok(s) = rx.recv() {
|
||||
} else if let Ok(s) = rx.recv() {
|
||||
s
|
||||
} else {
|
||||
let _ = signal::kill(Pid::from_raw(pid), Some(Signal::SIGKILL));
|
||||
-libc::EPIPE
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
handle.join().unwrap();
|
||||
|
@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, bail, Context, Error, Result};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use libc::uid_t;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
@ -22,13 +22,11 @@ use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use path_absolutize::*;
|
||||
use scan_fmt;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::sync::write_count;
|
||||
use lazy_static;
|
||||
use std::string::ToString;
|
||||
|
||||
use crate::log_child;
|
||||
@ -50,7 +48,7 @@ pub struct Info {
|
||||
vfs_opts: String,
|
||||
}
|
||||
|
||||
const MOUNTINFOFORMAT: &'static str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
const MOUNTINFOFORMAT: &str = "{d} {d} {d}:{d} {} {} {} {}";
|
||||
const PROC_PATH: &str = "/proc";
|
||||
|
||||
// since libc didn't defined this const for musl, thus redefined it here.
|
||||
@ -153,7 +151,7 @@ pub fn init_rootfs(
|
||||
let linux = &spec
|
||||
.linux
|
||||
.as_ref()
|
||||
.ok_or::<Error>(anyhow!("Could not get linux configuration from spec"))?;
|
||||
.ok_or_else(|| anyhow!("Could not get linux configuration from spec"))?;
|
||||
|
||||
let mut flags = MsFlags::MS_REC;
|
||||
match PROPAGATION.get(&linux.rootfs_propagation.as_str()) {
|
||||
@ -164,14 +162,14 @@ pub fn init_rootfs(
|
||||
let root = spec
|
||||
.root
|
||||
.as_ref()
|
||||
.ok_or(anyhow!("Could not get rootfs path from spec"))
|
||||
.ok_or_else(|| anyhow!("Could not get rootfs path from spec"))
|
||||
.and_then(|r| {
|
||||
fs::canonicalize(r.path.as_str()).context("Could not canonicalize rootfs path")
|
||||
})?;
|
||||
|
||||
let rootfs = (*root)
|
||||
.to_str()
|
||||
.ok_or(anyhow!("Could not convert rootfs path to string"))?;
|
||||
.ok_or_else(|| anyhow!("Could not convert rootfs path to string"))?;
|
||||
|
||||
mount(None::<&str>, "/", None::<&str>, flags, None::<&str>)?;
|
||||
|
||||
@ -187,7 +185,7 @@ pub fn init_rootfs(
|
||||
|
||||
for m in &spec.mounts {
|
||||
let (mut flags, data) = parse_mount(&m);
|
||||
if !m.destination.starts_with("/") || m.destination.contains("..") {
|
||||
if !m.destination.starts_with('/') || m.destination.contains("..") {
|
||||
return Err(anyhow!(
|
||||
"the mount destination {} is invalid",
|
||||
m.destination
|
||||
@ -273,9 +271,9 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
// only allow a mount on-top of proc if it's source is "proc"
|
||||
unsafe {
|
||||
let mut stats = MaybeUninit::<libc::statfs>::uninit();
|
||||
if let Ok(_) = m
|
||||
.source
|
||||
if m.source
|
||||
.with_nix_path(|path| libc::statfs(path.as_ptr(), stats.as_mut_ptr()))
|
||||
.is_ok()
|
||||
{
|
||||
if stats.assume_init().f_type == PROC_SUPER_MAGIC {
|
||||
return Ok(());
|
||||
@ -298,7 +296,7 @@ fn check_proc_mount(m: &Mount) -> Result<()> {
|
||||
)));
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mount_cgroups_v2(cfd_log: RawFd, m: &Mount, rootfs: &str, flags: MsFlags) -> Result<()> {
|
||||
@ -586,15 +584,14 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
|
||||
let abs_root_buf = root_path.absolutize()?;
|
||||
let abs_root = abs_root_buf
|
||||
.to_str()
|
||||
.ok_or::<Error>(anyhow!("failed to parse {} to absolute path", rootfs))?;
|
||||
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", rootfs))?;
|
||||
|
||||
for info in mount_infos.iter() {
|
||||
let mount_point = Path::new(&info.mount_point);
|
||||
let abs_mount_buf = mount_point.absolutize()?;
|
||||
let abs_mount_point = abs_mount_buf.to_str().ok_or::<Error>(anyhow!(
|
||||
"failed to parse {} to absolute path",
|
||||
info.mount_point
|
||||
))?;
|
||||
let abs_mount_point = abs_mount_buf
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow!("failed to parse {} to absolute path", info.mount_point))?;
|
||||
let abs_mount_point_string = String::from(abs_mount_point);
|
||||
|
||||
// Umount every syfs and proc file systems, except those under the container rootfs
|
||||
@ -755,7 +752,7 @@ fn mount_from(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
static SYMLINKS: &'static [(&'static str, &'static str)] = &[
|
||||
static SYMLINKS: &[(&str, &str)] = &[
|
||||
("/proc/self/fd", "dev/fd"),
|
||||
("/proc/self/fd/0", "dev/stdin"),
|
||||
("/proc/self/fd/1", "dev/stdout"),
|
||||
@ -888,7 +885,7 @@ pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
fn mask_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
@ -917,7 +914,7 @@ fn mask_path(path: &str) -> Result<()> {
|
||||
}
|
||||
|
||||
fn readonly_path(path: &str) -> Result<()> {
|
||||
if !path.starts_with("/") || path.contains("..") {
|
||||
if !path.starts_with('/') || path.contains("..") {
|
||||
return Err(nix::Error::Sys(Errno::EINVAL).into());
|
||||
}
|
||||
|
||||
|
@ -88,14 +88,14 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg: i32 = i32::from_be_bytes(buf_array);
|
||||
match msg {
|
||||
SYNC_SUCCESS => return Ok(Vec::new()),
|
||||
SYNC_SUCCESS => Ok(Vec::new()),
|
||||
SYNC_DATA => {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg_length: i32 = i32::from_be_bytes(buf_array);
|
||||
let data_buf = read_count(fd, msg_length as usize)?;
|
||||
|
||||
return Ok(data_buf);
|
||||
Ok(data_buf)
|
||||
}
|
||||
SYNC_FAILED => {
|
||||
let mut error_buf = vec![];
|
||||
@ -119,9 +119,9 @@ pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
}
|
||||
};
|
||||
|
||||
return Err(anyhow!(error_str));
|
||||
Err(anyhow!(error_str))
|
||||
}
|
||||
_ => return Err(anyhow!("error in receive sync message")),
|
||||
_ => Err(anyhow!("error in receive sync message")),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,13 +5,12 @@
|
||||
|
||||
use crate::container::Config;
|
||||
use anyhow::{anyhow, Result};
|
||||
use lazy_static;
|
||||
use nix::errno::Errno;
|
||||
use oci::{LinuxIDMapping, LinuxNamespace, Spec};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
|
||||
fn contain_namespace(nses: &[LinuxNamespace], key: &str) -> bool {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return true;
|
||||
@ -21,7 +20,7 @@ fn contain_namespace(nses: &Vec<LinuxNamespace>, key: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn get_namespace_path(nses: &Vec<LinuxNamespace>, key: &str) -> Result<String> {
|
||||
fn get_namespace_path(nses: &[LinuxNamespace], key: &str) -> Result<String> {
|
||||
for ns in nses {
|
||||
if ns.r#type.as_str() == key {
|
||||
return Ok(ns.path.clone());
|
||||
@ -41,11 +40,9 @@ fn rootfs(root: &str) -> Result<()> {
|
||||
// symbolic link? ..?
|
||||
let mut stack: Vec<String> = Vec::new();
|
||||
for c in path.components() {
|
||||
if stack.is_empty() {
|
||||
if c == Component::RootDir || c == Component::ParentDir {
|
||||
if stack.is_empty() && (c == Component::RootDir || c == Component::ParentDir) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if c == Component::ParentDir {
|
||||
stack.pop();
|
||||
@ -74,7 +71,7 @@ fn network(_oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
fn hostname(oci: &Spec) -> Result<()> {
|
||||
if oci.hostname.is_empty() || oci.hostname == "".to_string() {
|
||||
if oci.hostname.is_empty() || oci.hostname == "" {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -91,7 +88,7 @@ fn hostname(oci: &Spec) -> Result<()> {
|
||||
|
||||
fn security(oci: &Spec) -> Result<()> {
|
||||
let linux = oci.linux.as_ref().unwrap();
|
||||
if linux.masked_paths.len() == 0 && linux.readonly_paths.len() == 0 {
|
||||
if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -104,7 +101,7 @@ fn security(oci: &Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> {
|
||||
fn idmapping(maps: &[LinuxIDMapping]) -> Result<()> {
|
||||
for map in maps {
|
||||
if map.size > 0 {
|
||||
return Ok(());
|
||||
@ -127,7 +124,7 @@ fn usernamespace(oci: &Spec) -> Result<()> {
|
||||
idmapping(&linux.gid_mappings)?;
|
||||
} else {
|
||||
// no user namespace but idmap
|
||||
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 {
|
||||
if !linux.uid_mappings.is_empty() || !linux.gid_mappings.is_empty() {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
@ -197,7 +194,7 @@ fn sysctl(oci: &Spec) -> Result<()> {
|
||||
}
|
||||
|
||||
let net = get_namespace_path(&linux.namespaces, "network")?;
|
||||
if net.is_empty() || net == "".to_string() {
|
||||
if net.is_empty() || net == "" {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -225,7 +222,7 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
|
||||
if linux.uid_mappings.len() == 0 || linux.gid_mappings.len() == 0 {
|
||||
if linux.uid_mappings.is_empty() || linux.gid_mappings.is_empty() {
|
||||
// rootless containers requires at least one UID/GID mapping
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
@ -233,7 +230,7 @@ fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_idmapping(maps: &Vec<LinuxIDMapping>, id: u32) -> bool {
|
||||
fn has_idmapping(maps: &[LinuxIDMapping], id: u32) -> bool {
|
||||
for map in maps {
|
||||
if id >= map.container_id && id < map.container_id + map.size {
|
||||
return true;
|
||||
@ -256,20 +253,16 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
|
||||
|
||||
let id = fields[1].trim().parse::<u32>()?;
|
||||
|
||||
if opt.starts_with("uid=") {
|
||||
if !has_idmapping(&linux.uid_mappings, id) {
|
||||
if opt.starts_with("uid=") && !has_idmapping(&linux.uid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
|
||||
if opt.starts_with("gid=") {
|
||||
if !has_idmapping(&linux.gid_mappings, id) {
|
||||
if opt.starts_with("gid=") && !has_idmapping(&linux.gid_mappings, id) {
|
||||
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -144,7 +144,7 @@ impl agentConfig {
|
||||
}
|
||||
|
||||
fn get_vsock_port(p: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = p.split("=").collect();
|
||||
let fields: Vec<&str> = p.split('=').collect();
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid port parameter"));
|
||||
}
|
||||
@ -180,7 +180,7 @@ fn logrus_to_slog_level(logrus_level: &str) -> Result<slog::Level> {
|
||||
}
|
||||
|
||||
fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid log level parameter"));
|
||||
@ -194,7 +194,7 @@ fn get_log_level(param: &str) -> Result<slog::Level> {
|
||||
}
|
||||
|
||||
fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid hotplug timeout parameter"));
|
||||
@ -214,7 +214,7 @@ fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
|
||||
}
|
||||
|
||||
fn get_bool_value(param: &str) -> Result<bool> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Ok(false);
|
||||
@ -225,18 +225,14 @@ fn get_bool_value(param: &str) -> Result<bool> {
|
||||
// first try to parse as bool value
|
||||
v.parse::<bool>().or_else(|_err1| {
|
||||
// then try to parse as integer value
|
||||
v.parse::<u64>().or_else(|_err2| Ok(0)).and_then(|v| {
|
||||
// only `0` returns false, otherwise returns true
|
||||
Ok(match v {
|
||||
0 => false,
|
||||
_ => true,
|
||||
})
|
||||
})
|
||||
v.parse::<u64>()
|
||||
.or_else(|_err2| Ok(0))
|
||||
.map(|v| !matches!(v, 0))
|
||||
})
|
||||
}
|
||||
|
||||
fn get_container_pipe_size(param: &str) -> Result<i32> {
|
||||
let fields: Vec<&str> = param.split("=").collect();
|
||||
let fields: Vec<&str> = param.split('=').collect();
|
||||
|
||||
if fields.len() != 2 {
|
||||
return Err(anyhow!("invalid container pipe size parameter"));
|
||||
@ -634,10 +630,10 @@ mod tests {
|
||||
let filename = file_path.to_str().expect("failed to create filename");
|
||||
|
||||
let mut file =
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
|
||||
let mut config = agentConfig::new();
|
||||
assert_eq!(config.debug_console, false, "{}", msg);
|
||||
@ -737,7 +733,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -831,7 +827,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -901,7 +897,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -975,7 +971,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
||||
assert_result!(d.result, result, format!("{}", msg));
|
||||
assert_result!(d.result, result, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,8 +38,8 @@ struct DevIndex(HashMap<String, DevIndexEntry>);
|
||||
// DeviceHandler is the type of callback to be defined to handle every type of device driver.
|
||||
type DeviceHandler = fn(&Device, &mut Spec, &Arc<Mutex<Sandbox>>, &DevIndex) -> Result<()>;
|
||||
|
||||
// DeviceHandlerList lists the supported drivers.
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
// DEVICEHANDLERLIST lists the supported drivers.
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
static ref DEVICEHANDLERLIST: HashMap<&'static str, DeviceHandler> = {
|
||||
let mut m: HashMap<&'static str, DeviceHandler> = HashMap::new();
|
||||
@ -65,7 +65,7 @@ pub fn online_device(path: &str) -> Result<()> {
|
||||
// Here, bridgeAddr is the address at which the bridge is attached on the root bus,
|
||||
// while deviceAddr is the address at which the device is attached on the bridge.
|
||||
fn get_pci_device_address(pci_id: &str) -> Result<String> {
|
||||
let tokens: Vec<&str> = pci_id.split("/").collect();
|
||||
let tokens: Vec<&str> = pci_id.split('/').collect();
|
||||
|
||||
if tokens.len() != 2 {
|
||||
return Err(anyhow!(
|
||||
@ -165,7 +165,7 @@ pub fn get_pci_device_name(sandbox: &Arc<Mutex<Sandbox>>, pci_id: &str) -> Resul
|
||||
|
||||
/// Scan SCSI bus for the given SCSI address(SCSI-Id and LUN)
|
||||
fn scan_scsi_bus(scsi_addr: &str) -> Result<()> {
|
||||
let tokens: Vec<&str> = scsi_addr.split(":").collect();
|
||||
let tokens: Vec<&str> = scsi_addr.split(':').collect();
|
||||
if tokens.len() != 2 {
|
||||
return Err(anyhow!(
|
||||
"Unexpected format for SCSI Address: {}, expect SCSIID:LUA",
|
||||
@ -336,11 +336,11 @@ impl DevIndex {
|
||||
fn new(spec: &Spec) -> DevIndex {
|
||||
let mut map = HashMap::new();
|
||||
|
||||
for linux in spec.linux.as_ref() {
|
||||
if let Some(linux) = spec.linux.as_ref() {
|
||||
for (i, d) in linux.devices.iter().enumerate() {
|
||||
let mut residx = Vec::new();
|
||||
|
||||
for linuxres in linux.resources.as_ref() {
|
||||
if let Some(linuxres) = linux.resources.as_ref() {
|
||||
for (j, r) in linuxres.devices.iter().enumerate() {
|
||||
if r.r#type == d.r#type
|
||||
&& r.major == Some(d.major)
|
||||
|
@ -246,8 +246,8 @@ fn start_sandbox(logger: &Logger, config: &agentConfig, init_mode: bool) -> Resu
|
||||
let (tx, rx) = mpsc::channel::<i32>();
|
||||
sandbox.lock().unwrap().sender = Some(tx);
|
||||
|
||||
//vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str());
|
||||
// vsock:///dev/vsock, port
|
||||
let mut server = rpc::start(sandbox, config.server_addr.as_str());
|
||||
|
||||
let _ = server.start().unwrap();
|
||||
|
||||
@ -272,8 +272,6 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
|
||||
let signals = Signals::new(&[SIGCHLD])?;
|
||||
|
||||
let s = sandbox.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
'outer: for sig in signals.forever() {
|
||||
info!(logger, "received signal"; "signal" => sig);
|
||||
@ -303,13 +301,13 @@ fn setup_signal_handler(logger: &Logger, sandbox: Arc<Mutex<Sandbox>>) -> Result
|
||||
};
|
||||
|
||||
let pid = wait_status.pid();
|
||||
if pid.is_some() {
|
||||
let raw_pid = pid.unwrap().as_raw();
|
||||
if let Some(pid) = pid {
|
||||
let raw_pid = pid.as_raw();
|
||||
let child_pid = format!("{}", raw_pid);
|
||||
|
||||
let logger = logger.new(o!("child-pid" => child_pid));
|
||||
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let mut sandbox = sandbox.lock().unwrap();
|
||||
let process = sandbox.find_process(raw_pid);
|
||||
if process.is_none() {
|
||||
info!(logger, "child exited unexpectedly");
|
||||
@ -366,7 +364,8 @@ fn init_agent_as_init(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result
|
||||
|
||||
env::set_var("PATH", "/bin:/sbin/:/usr/bin/:/usr/sbin/");
|
||||
|
||||
let contents = std::fs::read_to_string("/etc/hostname").unwrap_or(String::from("localhost"));
|
||||
let contents =
|
||||
std::fs::read_to_string("/etc/hostname").unwrap_or_else(|_| String::from("localhost"));
|
||||
let contents_array: Vec<&str> = contents.split(' ').collect();
|
||||
let hostname = contents_array[0].trim();
|
||||
|
||||
@ -481,8 +480,8 @@ where
|
||||
|
||||
// write and return
|
||||
match writer.write_all(&buf[..buf_len]) {
|
||||
Ok(_) => return Ok(buf_len as u64),
|
||||
Err(err) => return Err(err),
|
||||
Ok(_) => Ok(buf_len as u64),
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,6 @@ extern crate procfs;
|
||||
use prometheus::{Encoder, Gauge, GaugeVec, IntCounter, TextEncoder};
|
||||
|
||||
use anyhow::Result;
|
||||
use protocols;
|
||||
|
||||
const NAMESPACE_KATA_AGENT: &str = "kata_agent";
|
||||
const NAMESPACE_KATA_GUEST: &str = "kata_guest";
|
||||
@ -85,18 +84,16 @@ pub fn get_metrics(_: &protocols::agent::GetMetricsRequest) -> Result<String> {
|
||||
let encoder = TextEncoder::new();
|
||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
||||
|
||||
Ok(String::from_utf8(buffer.clone()).unwrap())
|
||||
Ok(String::from_utf8(buffer).unwrap())
|
||||
}
|
||||
|
||||
fn update_agent_metrics() {
|
||||
let me = procfs::process::Process::myself();
|
||||
match me {
|
||||
Err(err) => {
|
||||
|
||||
if let Err(err) = me {
|
||||
error!(sl!(), "failed to create process instance: {:?}", err);
|
||||
return;
|
||||
}
|
||||
Ok(_) => {}
|
||||
}
|
||||
|
||||
let me = me.unwrap();
|
||||
|
||||
|
@ -39,7 +39,7 @@ pub const DRIVERLOCALTYPE: &str = "local";
|
||||
|
||||
pub const TYPEROOTFS: &str = "rootfs";
|
||||
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref FLAGS: HashMap<&'static str, (bool, MsFlags)> = {
|
||||
let mut m = HashMap::new();
|
||||
@ -88,7 +88,7 @@ pub struct INIT_MOUNT {
|
||||
options: Vec<&'static str>,
|
||||
}
|
||||
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static!{
|
||||
static ref CGROUPS: HashMap<&'static str, &'static str> = {
|
||||
let mut m = HashMap::new();
|
||||
@ -109,7 +109,7 @@ lazy_static!{
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref INIT_ROOTFS_MOUNTS: Vec<INIT_MOUNT> = vec![
|
||||
INIT_MOUNT{fstype: "proc", src: "proc", dest: "/proc", options: vec!["nosuid", "nodev", "noexec"]},
|
||||
@ -126,7 +126,7 @@ lazy_static! {
|
||||
type StorageHandler = fn(&Logger, &Storage, Arc<Mutex<Sandbox>>) -> Result<String>;
|
||||
|
||||
// STORAGEHANDLERLIST lists the supported drivers.
|
||||
#[cfg_attr(rustfmt, rustfmt_skip)]
|
||||
#[rustfmt::skip]
|
||||
lazy_static! {
|
||||
pub static ref STORAGEHANDLERLIST: HashMap<&'static str, StorageHandler> = {
|
||||
let mut m = HashMap::new();
|
||||
@ -173,9 +173,9 @@ impl<'a> BareMount<'a> {
|
||||
BareMount {
|
||||
source: s,
|
||||
destination: d,
|
||||
fs_type: fs_type,
|
||||
flags: flags,
|
||||
options: options,
|
||||
fs_type,
|
||||
flags,
|
||||
options,
|
||||
logger: logger.new(o!("subsystem" => "baremount")),
|
||||
}
|
||||
}
|
||||
@ -190,11 +190,11 @@ impl<'a> BareMount<'a> {
|
||||
let cstr_dest: CString;
|
||||
let cstr_fs_type: CString;
|
||||
|
||||
if self.source.len() == 0 {
|
||||
if self.source.is_empty() {
|
||||
return Err(anyhow!("need mount source"));
|
||||
}
|
||||
|
||||
if self.destination.len() == 0 {
|
||||
if self.destination.is_empty() {
|
||||
return Err(anyhow!("need mount destination"));
|
||||
}
|
||||
|
||||
@ -204,14 +204,14 @@ impl<'a> BareMount<'a> {
|
||||
cstr_dest = CString::new(self.destination)?;
|
||||
dest = cstr_dest.as_ptr();
|
||||
|
||||
if self.fs_type.len() == 0 {
|
||||
if self.fs_type.is_empty() {
|
||||
return Err(anyhow!("need mount FS type"));
|
||||
}
|
||||
|
||||
cstr_fs_type = CString::new(self.fs_type)?;
|
||||
fs_type = cstr_fs_type.as_ptr();
|
||||
|
||||
if self.options.len() > 0 {
|
||||
if !self.options.is_empty() {
|
||||
cstr_options = CString::new(self.options)?;
|
||||
options = cstr_options.as_ptr() as *const c_void;
|
||||
}
|
||||
@ -243,8 +243,7 @@ fn ephemeral_storage_handler(
|
||||
storage: &Storage,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
let s = sandbox.clone();
|
||||
let mut sb = s.lock().unwrap();
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
||||
|
||||
if !new_storage {
|
||||
@ -262,8 +261,7 @@ fn local_storage_handler(
|
||||
storage: &Storage,
|
||||
sandbox: Arc<Mutex<Sandbox>>,
|
||||
) -> Result<String> {
|
||||
let s = sandbox.clone();
|
||||
let mut sb = s.lock().unwrap();
|
||||
let mut sb = sandbox.lock().unwrap();
|
||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
||||
|
||||
if !new_storage {
|
||||
@ -279,8 +277,7 @@ fn local_storage_handler(
|
||||
|
||||
let opts = parse_options(opts_vec);
|
||||
let mode = opts.get("mode");
|
||||
if mode.is_some() {
|
||||
let mode = mode.unwrap();
|
||||
if let Some(mode) = mode {
|
||||
let mut permission = fs::metadata(&storage.mount_point)?.permissions();
|
||||
|
||||
let o_mode = u32::from_str_radix(mode, 8)?;
|
||||
@ -410,17 +407,17 @@ fn parse_mount_flags_and_options(options_vec: Vec<&str>) -> (MsFlags, String) {
|
||||
let mut options: String = "".to_string();
|
||||
|
||||
for opt in options_vec {
|
||||
if opt.len() != 0 {
|
||||
if !opt.is_empty() {
|
||||
match FLAGS.get(opt) {
|
||||
Some(x) => {
|
||||
let (_, f) = *x;
|
||||
flags = flags | f;
|
||||
flags |= f;
|
||||
}
|
||||
None => {
|
||||
if options.len() > 0 {
|
||||
if !options.is_empty() {
|
||||
options.push_str(format!(",{}", opt).as_str());
|
||||
} else {
|
||||
options.push_str(format!("{}", opt).as_str());
|
||||
options.push_str(opt.to_string().as_str());
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -458,7 +455,7 @@ pub fn add_storages(
|
||||
// Todo need to rollback the mounted storage if err met.
|
||||
let mount_point = handler(&logger, &storage, sandbox.clone())?;
|
||||
|
||||
if mount_point.len() > 0 {
|
||||
if !mount_point.is_empty() {
|
||||
mount_list.push(mount_point);
|
||||
}
|
||||
}
|
||||
@ -570,10 +567,10 @@ pub fn get_cgroup_mounts(
|
||||
'outer: for (_, line) in reader.lines().enumerate() {
|
||||
let line = line?;
|
||||
|
||||
let fields: Vec<&str> = line.split("\t").collect();
|
||||
let fields: Vec<&str> = line.split('\t').collect();
|
||||
|
||||
// Ignore comment header
|
||||
if fields[0].starts_with("#") {
|
||||
if fields[0].starts_with('#') {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -643,7 +640,7 @@ pub fn cgroups_mount(logger: &Logger, unified_cgroup_hierarchy: bool) -> Result<
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> {
|
||||
pub fn remove_mounts(mounts: &[String]) -> Result<()> {
|
||||
for m in mounts.iter() {
|
||||
mount::umount(m.as_str()).context(format!("failed to umount {:?}", m))?;
|
||||
}
|
||||
@ -675,7 +672,7 @@ fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
|
||||
fn parse_options(option_list: Vec<String>) -> HashMap<String, String> {
|
||||
let mut options = HashMap::new();
|
||||
for opt in option_list.iter() {
|
||||
let fields: Vec<&str> = opt.split("=").collect();
|
||||
let fields: Vec<&str> = opt.split('=').collect();
|
||||
if fields.len() != 2 {
|
||||
continue;
|
||||
}
|
||||
@ -856,7 +853,7 @@ mod tests {
|
||||
|
||||
let msg = format!("{}: umount result: {:?}", msg, result);
|
||||
|
||||
assert!(ret == 0, format!("{}", msg));
|
||||
assert!(ret == 0, msg);
|
||||
};
|
||||
|
||||
continue;
|
||||
@ -914,7 +911,8 @@ mod tests {
|
||||
.expect("failed to create mount destination filename");
|
||||
|
||||
for d in [test_dir_filename, mnt_src_filename, mnt_dest_filename].iter() {
|
||||
std::fs::create_dir_all(d).expect(&format!("failed to create directory {}", d));
|
||||
std::fs::create_dir_all(d)
|
||||
.unwrap_or_else(|_| panic!("failed to create directory {}", d));
|
||||
}
|
||||
|
||||
// Create an actual mount
|
||||
@ -1055,13 +1053,13 @@ mod tests {
|
||||
|
||||
let filename = file_path
|
||||
.to_str()
|
||||
.expect(&format!("{}: failed to create filename", msg));
|
||||
.unwrap_or_else(|| panic!("{}: failed to create filename", msg));
|
||||
|
||||
let mut file =
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
|
||||
let result = get_mount_fs_type_from_file(filename, d.mount_point);
|
||||
|
||||
@ -1217,10 +1215,10 @@ mod tests {
|
||||
.expect("failed to create cgroup file filename");
|
||||
|
||||
let mut file =
|
||||
File::create(filename).expect(&format!("{}: failed to create file", msg));
|
||||
File::create(filename).unwrap_or_else(|_| panic!("{}: failed to create file", msg));
|
||||
|
||||
file.write_all(d.contents.as_bytes())
|
||||
.expect(&format!("{}: failed to write file contents", msg));
|
||||
.unwrap_or_else(|_| panic!("{}: failed to write file contents", msg));
|
||||
|
||||
let result = get_cgroup_mounts(&logger, filename, false);
|
||||
let msg = format!("{}: result: {:?}", msg, result);
|
||||
|
@ -52,12 +52,12 @@ impl Namespace {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_ipc(mut self) -> Self {
|
||||
pub fn get_ipc(mut self) -> Self {
|
||||
self.ns_type = NamespaceType::IPC;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn as_uts(mut self, hostname: &str) -> Self {
|
||||
pub fn get_uts(mut self, hostname: &str) -> Self {
|
||||
self.ns_type = NamespaceType::UTS;
|
||||
if hostname != "" {
|
||||
self.hostname = Some(String::from(hostname));
|
||||
@ -65,7 +65,7 @@ impl Namespace {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn as_pid(mut self) -> Self {
|
||||
pub fn get_pid(mut self) -> Self {
|
||||
self.ns_type = NamespaceType::PID;
|
||||
self
|
||||
}
|
||||
@ -81,7 +81,7 @@ impl Namespace {
|
||||
fs::create_dir_all(&self.persistent_ns_dir)?;
|
||||
|
||||
let ns_path = PathBuf::from(&self.persistent_ns_dir);
|
||||
let ns_type = self.ns_type.clone();
|
||||
let ns_type = self.ns_type;
|
||||
let logger = self.logger.clone();
|
||||
|
||||
let new_ns_path = ns_path.join(&ns_type.get());
|
||||
@ -97,7 +97,7 @@ impl Namespace {
|
||||
File::open(Path::new(&origin_ns_path))?;
|
||||
|
||||
// Create a new netns on the current thread.
|
||||
let cf = ns_type.get_flags().clone();
|
||||
let cf = ns_type.get_flags();
|
||||
|
||||
unshare(cf)?;
|
||||
|
||||
@ -110,12 +110,9 @@ impl Namespace {
|
||||
|
||||
let mut flags = MsFlags::empty();
|
||||
|
||||
match FLAGS.get("rbind") {
|
||||
Some(x) => {
|
||||
if let Some(x) = FLAGS.get("rbind") {
|
||||
let (_, f) = *x;
|
||||
flags = flags | f;
|
||||
}
|
||||
None => (),
|
||||
flags |= f;
|
||||
};
|
||||
|
||||
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
|
||||
@ -194,23 +191,23 @@ mod tests {
|
||||
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
|
||||
|
||||
let ns_ipc = Namespace::new(&logger)
|
||||
.as_ipc()
|
||||
.get_ipc()
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_ipc.is_ok());
|
||||
assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok());
|
||||
assert!(remove_mounts(&[ns_ipc.unwrap().path]).is_ok());
|
||||
|
||||
let logger = slog::Logger::root(slog::Discard, o!());
|
||||
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap();
|
||||
|
||||
let ns_uts = Namespace::new(&logger)
|
||||
.as_uts("test_hostname")
|
||||
.get_uts("test_hostname")
|
||||
.set_root_dir(tmpdir.path().to_str().unwrap())
|
||||
.setup();
|
||||
|
||||
assert!(ns_uts.is_ok());
|
||||
assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok());
|
||||
assert!(remove_mounts(&[ns_uts.unwrap().path]).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -48,7 +48,7 @@ pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
|
||||
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
|
||||
let logger = logger.new(o!( "subsystem" => "network"));
|
||||
|
||||
if dns_list.len() == 0 {
|
||||
if dns_list.is_empty() {
|
||||
info!(
|
||||
logger,
|
||||
"Did not set sandbox DNS as DNS not received as part of request."
|
||||
@ -117,12 +117,12 @@ mod tests {
|
||||
];
|
||||
|
||||
// write to /run/kata-containers/sandbox/resolv.conf
|
||||
let mut src_file =
|
||||
File::create(src_filename).expect(&format!("failed to create file {:?}", src_filename));
|
||||
let mut src_file = File::create(src_filename)
|
||||
.unwrap_or_else(|_| panic!("failed to create file {:?}", src_filename));
|
||||
let content = dns.join("\n");
|
||||
src_file
|
||||
.write_all(content.as_bytes())
|
||||
.expect(&format!("failed to write file contents"));
|
||||
.expect("failed to write file contents");
|
||||
|
||||
// call do_setup_guest_dns
|
||||
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);
|
||||
|
@ -4,7 +4,6 @@
|
||||
//
|
||||
|
||||
use anyhow::Result;
|
||||
use libc;
|
||||
use nix::errno::Errno;
|
||||
use nix::fcntl::{self, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
|
@ -21,7 +21,6 @@ use protocols::health::{
|
||||
HealthCheckResponse, HealthCheckResponse_ServingStatus, VersionCheckResponse,
|
||||
};
|
||||
use protocols::types::Interface;
|
||||
use rustjail;
|
||||
use rustjail::cgroups::notifier;
|
||||
use rustjail::container::{BaseContainer, Container, LinuxContainer};
|
||||
use rustjail::process::Process;
|
||||
@ -47,7 +46,6 @@ use crate::AGENT_CONFIG;
|
||||
use netlink::{RtnlHandle, NETLINK_ROUTE};
|
||||
|
||||
use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ};
|
||||
use serde_json;
|
||||
use std::convert::TryFrom;
|
||||
use std::fs;
|
||||
use std::os::unix::io::RawFd;
|
||||
@ -152,14 +150,13 @@ impl agentService {
|
||||
|
||||
let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size;
|
||||
let p = if oci.process.is_some() {
|
||||
let tp = Process::new(
|
||||
Process::new(
|
||||
&sl!(),
|
||||
&oci.process.as_ref().unwrap(),
|
||||
cid.as_str(),
|
||||
true,
|
||||
pipe_size,
|
||||
)?;
|
||||
tp
|
||||
)?
|
||||
} else {
|
||||
info!(sl!(), "no process configurations!");
|
||||
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::EINVAL)));
|
||||
@ -175,7 +172,7 @@ impl agentService {
|
||||
}
|
||||
|
||||
fn do_start_container(&self, req: protocols::agent::StartContainerRequest) -> Result<()> {
|
||||
let cid = req.container_id.clone();
|
||||
let cid = req.container_id;
|
||||
|
||||
let sandbox = self.sandbox.clone();
|
||||
let mut s = sandbox.lock().unwrap();
|
||||
@ -183,7 +180,7 @@ impl agentService {
|
||||
|
||||
let ctr = s
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
ctr.exec()?;
|
||||
|
||||
@ -206,9 +203,7 @@ impl agentService {
|
||||
let mut remove_container_resources = |sandbox: &mut Sandbox| -> Result<()> {
|
||||
// Find the sandbox storage used by this container
|
||||
let mounts = sandbox.container_mounts.get(&cid);
|
||||
if mounts.is_some() {
|
||||
let mounts = mounts.unwrap();
|
||||
|
||||
if let Some(mounts) = mounts {
|
||||
remove_mounts(&mounts)?;
|
||||
|
||||
for m in mounts.iter() {
|
||||
@ -232,7 +227,7 @@ impl agentService {
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
ctr.destroy()?;
|
||||
|
||||
@ -250,11 +245,11 @@ impl agentService {
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let _ctr = sandbox
|
||||
.get_container(&cid2)
|
||||
.ok_or(anyhow!("Invalid container id"))
|
||||
.and_then(|ctr| {
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))
|
||||
.map(|ctr| {
|
||||
ctr.destroy().unwrap();
|
||||
tx.send(1).unwrap();
|
||||
Ok(ctr)
|
||||
ctr
|
||||
});
|
||||
});
|
||||
|
||||
@ -277,7 +272,7 @@ impl agentService {
|
||||
let cid = req.container_id.clone();
|
||||
let exec_id = req.exec_id.clone();
|
||||
|
||||
info!(sl!(), "cid: {} eid: {}", cid.clone(), exec_id.clone());
|
||||
info!(sl!(), "cid: {} eid: {}", cid, exec_id);
|
||||
|
||||
let s = self.sandbox.clone();
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
@ -294,7 +289,7 @@ impl agentService {
|
||||
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
ctr.run(p)?;
|
||||
|
||||
@ -340,7 +335,7 @@ impl agentService {
|
||||
req: protocols::agent::WaitProcessRequest,
|
||||
) -> Result<protocols::agent::WaitProcessResponse> {
|
||||
let cid = req.container_id.clone();
|
||||
let eid = req.exec_id.clone();
|
||||
let eid = req.exec_id;
|
||||
let s = self.sandbox.clone();
|
||||
let mut resp = WaitProcessResponse::new();
|
||||
let pid: pid_t;
|
||||
@ -376,7 +371,7 @@ impl agentService {
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
let ctr = sandbox
|
||||
.get_container(&cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
let mut p = match ctr.processes.get_mut(&pid) {
|
||||
Some(p) => p,
|
||||
@ -584,16 +579,18 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
) -> ttrpc::Result<ListProcessesResponse> {
|
||||
let cid = req.container_id.clone();
|
||||
let format = req.format.clone();
|
||||
let mut args = req.args.clone().into_vec();
|
||||
let mut args = req.args.into_vec();
|
||||
let mut resp = ListProcessesResponse::new();
|
||||
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
let pids = ctr.processes().unwrap();
|
||||
|
||||
@ -612,7 +609,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
}
|
||||
|
||||
// format "table"
|
||||
if args.len() == 0 {
|
||||
if args.is_empty() {
|
||||
// default argument
|
||||
args = vec!["-ef".to_string()];
|
||||
}
|
||||
@ -670,10 +667,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
let resp = Empty::new();
|
||||
|
||||
@ -696,14 +695,16 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
_ctx: &ttrpc::TtrpcContext,
|
||||
req: protocols::agent::StatsContainerRequest,
|
||||
) -> ttrpc::Result<StatsContainerResponse> {
|
||||
let cid = req.container_id.clone();
|
||||
let cid = req.container_id;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.stats()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))
|
||||
@ -718,10 +719,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.pause()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
@ -738,10 +741,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
let ctr = sandbox.get_container(&cid).ok_or(ttrpc_error(
|
||||
let ctr = sandbox.get_container(&cid).ok_or_else(|| {
|
||||
ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
"invalid container id".to_string(),
|
||||
))?;
|
||||
)
|
||||
})?;
|
||||
|
||||
ctr.resume()
|
||||
.map_err(|e| ttrpc_error(ttrpc::Code::INTERNAL, e.to_string()))?;
|
||||
@ -782,7 +787,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
req: protocols::agent::CloseStdinRequest,
|
||||
) -> ttrpc::Result<Empty> {
|
||||
let cid = req.container_id.clone();
|
||||
let eid = req.exec_id.clone();
|
||||
let eid = req.exec_id;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
@ -852,11 +857,11 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
if req.interface.is_none() {
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("empty update interface request"),
|
||||
"empty update interface request".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let interface = req.interface.clone();
|
||||
let interface = req.interface;
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
|
||||
@ -884,11 +889,11 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
if req.routes.is_none() {
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("empty update routes request"),
|
||||
"empty update routes request".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let rs = req.routes.clone().unwrap().Routes.into_vec();
|
||||
let rs = req.routes.unwrap().Routes.into_vec();
|
||||
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
@ -1002,7 +1007,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
});
|
||||
}
|
||||
|
||||
if req.sandbox_id.len() > 0 {
|
||||
if !req.sandbox_id.is_empty() {
|
||||
s.id = req.sandbox_id.clone();
|
||||
}
|
||||
|
||||
@ -1028,7 +1033,7 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
Ok(_) => {
|
||||
let sandbox = self.sandbox.clone();
|
||||
let mut s = sandbox.lock().unwrap();
|
||||
let _ = req
|
||||
let _dns = req
|
||||
.dns
|
||||
.to_vec()
|
||||
.iter()
|
||||
@ -1065,11 +1070,11 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
if req.neighbors.is_none() {
|
||||
return Err(ttrpc_error(
|
||||
ttrpc::Code::INVALID_ARGUMENT,
|
||||
format!("empty add arp neighbours request"),
|
||||
"empty add arp neighbours request".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let neighs = req.neighbors.clone().unwrap().ARPNeighbors.into_vec();
|
||||
let neighs = req.neighbors.unwrap().ARPNeighbors.into_vec();
|
||||
|
||||
let s = Arc::clone(&self.sandbox);
|
||||
let mut sandbox = s.lock().unwrap();
|
||||
@ -1198,12 +1203,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
|
||||
drop(sandbox);
|
||||
|
||||
match event_rx.recv() {
|
||||
Err(err) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, err.to_string())),
|
||||
Err(err) => Err(ttrpc_error(ttrpc::Code::INTERNAL, err.to_string())),
|
||||
Ok(container_id) => {
|
||||
info!(sl!(), "get_oom_event return {}", &container_id);
|
||||
let mut resp = OOMEvent::new();
|
||||
resp.container_id = container_id;
|
||||
return Ok(resp);
|
||||
Ok(resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1243,7 +1248,7 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
|
||||
if block_size {
|
||||
match fs::read_to_string(SYSFS_MEMORY_BLOCK_SIZE_PATH) {
|
||||
Ok(v) => {
|
||||
if v.len() == 0 {
|
||||
if v.is_empty() {
|
||||
info!(sl!(), "string in empty???");
|
||||
return Err(anyhow!("Invalid block size"));
|
||||
}
|
||||
@ -1322,7 +1327,7 @@ fn read_stream(fd: RawFd, l: usize) -> Result<Vec<u8>> {
|
||||
}
|
||||
Err(e) => match e {
|
||||
nix::Error::Sys(errno) => match errno {
|
||||
Errno::EAGAIN => v.resize(0, 0),
|
||||
Errno::EAGAIN => v.clear(),
|
||||
_ => return Err(anyhow!(nix::Error::Sys(errno))),
|
||||
},
|
||||
_ => return Err(anyhow!("read error")),
|
||||
@ -1340,13 +1345,13 @@ fn find_process<'a>(
|
||||
) -> Result<&'a mut Process> {
|
||||
let ctr = sandbox
|
||||
.get_container(cid)
|
||||
.ok_or(anyhow!("Invalid container id"))?;
|
||||
.ok_or_else(|| anyhow!("Invalid container id"))?;
|
||||
|
||||
if init || eid == "" {
|
||||
return ctr
|
||||
.processes
|
||||
.get_mut(&ctr.init_process_pid)
|
||||
.ok_or(anyhow!("cannot find init process!"));
|
||||
.ok_or_else(|| anyhow!("cannot find init process!"));
|
||||
}
|
||||
|
||||
ctr.get_process(eid).map_err(|_| anyhow!("Invalid exec id"))
|
||||
@ -1396,7 +1401,7 @@ fn update_container_namespaces(
|
||||
let linux = spec
|
||||
.linux
|
||||
.as_mut()
|
||||
.ok_or(anyhow!("Spec didn't container linux field"))?;
|
||||
.ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
|
||||
|
||||
let namespaces = linux.namespaces.as_mut_slice();
|
||||
for namespace in namespaces.iter_mut() {
|
||||
@ -1464,7 +1469,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
|
||||
}
|
||||
};
|
||||
if line.starts_with("SigCgt:") {
|
||||
let mask_vec: Vec<&str> = line.split(":").collect();
|
||||
let mask_vec: Vec<&str> = line.split(':').collect();
|
||||
if mask_vec.len() != 2 {
|
||||
warn!(sl!(), "parse the SigCgt field failed\n");
|
||||
return false;
|
||||
@ -1484,7 +1489,7 @@ fn is_signal_handled(pid: pid_t, signum: u32) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn do_mem_hotplug_by_probe(addrs: &Vec<u64>) -> Result<()> {
|
||||
fn do_mem_hotplug_by_probe(addrs: &[u64]) -> Result<()> {
|
||||
for addr in addrs.iter() {
|
||||
fs::write(SYSFS_MEMORY_HOTPLUG_PROBE_PATH, format!("{:#X}", *addr))?;
|
||||
}
|
||||
@ -1497,8 +1502,12 @@ fn do_set_guest_date_time(sec: i64, usec: i64) -> Result<()> {
|
||||
tv_usec: usec,
|
||||
};
|
||||
|
||||
let ret =
|
||||
unsafe { libc::settimeofday(&tv as *const libc::timeval, 0 as *const libc::timezone) };
|
||||
let ret = unsafe {
|
||||
libc::settimeofday(
|
||||
&tv as *const libc::timeval,
|
||||
std::ptr::null::<libc::timezone>(),
|
||||
)
|
||||
};
|
||||
|
||||
Errno::result(ret).map(drop)?;
|
||||
|
||||
@ -1514,8 +1523,8 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
|
||||
|
||||
let parent = path.parent();
|
||||
|
||||
let dir = if parent.is_some() {
|
||||
parent.unwrap().to_path_buf()
|
||||
let dir = if let Some(parent) = parent {
|
||||
parent.to_path_buf()
|
||||
} else {
|
||||
PathBuf::from("/")
|
||||
};
|
||||
@ -1575,8 +1584,8 @@ fn setup_bundle(cid: &str, spec: &mut Spec) -> Result<PathBuf> {
|
||||
let spec_root = spec.root.as_ref().unwrap();
|
||||
|
||||
let bundle_path = Path::new(CONTAINER_BASE).join(cid);
|
||||
let config_path = bundle_path.clone().join("config.json");
|
||||
let rootfs_path = bundle_path.clone().join("rootfs");
|
||||
let config_path = bundle_path.join("config.json");
|
||||
let rootfs_path = bundle_path.join("rootfs");
|
||||
|
||||
fs::create_dir_all(&rootfs_path)?;
|
||||
BareMount::new(
|
||||
@ -1640,9 +1649,9 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
|
||||
"load_kernel_module return code: {} stdout:{} stderr:{}",
|
||||
code, std_out, std_err
|
||||
);
|
||||
return Err(anyhow!(msg));
|
||||
Err(anyhow!(msg))
|
||||
}
|
||||
None => return Err(anyhow!("Process terminated by signal")),
|
||||
None => Err(anyhow!("Process terminated by signal")),
|
||||
}
|
||||
}
|
||||
|
||||
@ -1654,17 +1663,16 @@ mod tests {
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use ttrpc::{MessageHeader, TtrpcContext};
|
||||
|
||||
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<(MessageHeader, Vec<u8>)>) {
|
||||
type Message = (MessageHeader, Vec<u8>);
|
||||
|
||||
fn mk_ttrpc_context() -> (TtrpcContext, Receiver<Message>) {
|
||||
let mh = MessageHeader::default();
|
||||
|
||||
let (tx, rx): (
|
||||
Sender<(MessageHeader, Vec<u8>)>,
|
||||
Receiver<(MessageHeader, Vec<u8>)>,
|
||||
) = channel();
|
||||
let (tx, rx): (Sender<Message>, Receiver<Message>) = channel();
|
||||
|
||||
let ctx = TtrpcContext {
|
||||
fd: -1,
|
||||
mh: mh,
|
||||
mh,
|
||||
res_tx: tx,
|
||||
};
|
||||
|
||||
|
@ -75,7 +75,7 @@ impl Sandbox {
|
||||
sender: None,
|
||||
rtnl: Some(RtnlHandle::new(NETLINK_ROUTE, 0).unwrap()),
|
||||
hooks: None,
|
||||
event_rx: event_rx,
|
||||
event_rx,
|
||||
event_tx: tx,
|
||||
})
|
||||
}
|
||||
@ -112,14 +112,14 @@ impl Sandbox {
|
||||
// acquiring a lock on sandbox.
|
||||
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
|
||||
match self.storages.get_mut(path) {
|
||||
None => return Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
None => Err(anyhow!("Sandbox storage with path {} not found", path)),
|
||||
Some(count) => {
|
||||
*count -= 1;
|
||||
if *count < 1 {
|
||||
self.storages.remove(path);
|
||||
return Ok(true);
|
||||
}
|
||||
return Ok(false);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -161,13 +161,13 @@ impl Sandbox {
|
||||
pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
|
||||
// Set up shared IPC namespace
|
||||
self.shared_ipcns = Namespace::new(&self.logger)
|
||||
.as_ipc()
|
||||
.get_ipc()
|
||||
.setup()
|
||||
.context("Failed to setup persistent IPC namespace")?;
|
||||
|
||||
// // Set up shared UTS namespace
|
||||
self.shared_utsns = Namespace::new(&self.logger)
|
||||
.as_uts(self.hostname.as_str())
|
||||
.get_uts(self.hostname.as_str())
|
||||
.setup()
|
||||
.context("Failed to setup persistent UTS namespace")?;
|
||||
|
||||
@ -184,7 +184,7 @@ impl Sandbox {
|
||||
// This means a separate pause process has not been created. We treat the
|
||||
// first container created as the infra container in that case
|
||||
// and use its pid namespace in case pid namespace needs to be shared.
|
||||
if self.sandbox_pidns.is_none() && self.containers.len() == 0 {
|
||||
if self.sandbox_pidns.is_none() && self.containers.is_empty() {
|
||||
let init_pid = c.init_process_pid;
|
||||
if init_pid == -1 {
|
||||
return Err(anyhow!(
|
||||
@ -192,7 +192,7 @@ impl Sandbox {
|
||||
));
|
||||
}
|
||||
|
||||
let mut pid_ns = Namespace::new(&self.logger).as_pid();
|
||||
let mut pid_ns = Namespace::new(&self.logger).get_pid();
|
||||
pid_ns.path = format!("/proc/{}/ns/pid", init_pid);
|
||||
|
||||
self.sandbox_pidns = Some(pid_ns);
|
||||
@ -216,7 +216,7 @@ impl Sandbox {
|
||||
}
|
||||
|
||||
pub fn destroy(&mut self) -> Result<()> {
|
||||
for (_, ctr) in &mut self.containers {
|
||||
for ctr in self.containers.values_mut() {
|
||||
ctr.destroy()?;
|
||||
}
|
||||
Ok(())
|
||||
@ -332,7 +332,7 @@ fn online_resources(logger: &Logger, path: &str, pattern: &str, num: i32) -> Res
|
||||
}
|
||||
let c = c.unwrap();
|
||||
|
||||
if c.trim().contains("0") {
|
||||
if c.trim().contains('0') {
|
||||
let r = fs::write(file.as_str(), "1");
|
||||
if r.is_err() {
|
||||
continue;
|
||||
@ -612,8 +612,8 @@ mod tests {
|
||||
|
||||
let linux = Linux::default();
|
||||
let mut spec = Spec::default();
|
||||
spec.root = Some(root).into();
|
||||
spec.linux = Some(linux).into();
|
||||
spec.root = Some(root);
|
||||
spec.linux = Some(linux);
|
||||
|
||||
CreateOpts {
|
||||
cgroup_name: "".to_string(),
|
||||
|
@ -2,6 +2,7 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#![allow(clippy::module_inception)]
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_utils {
|
||||
|
@ -289,7 +289,7 @@ languages:
|
||||
description: |
|
||||
'newest-version' is the latest version known to work when
|
||||
building Kata
|
||||
newest-version: "1.44.1"
|
||||
newest-version: "1.47.0"
|
||||
|
||||
specs:
|
||||
description: "Details of important specifications"
|
||||
|
Loading…
Reference in New Issue
Block a user