Merge pull request #942 from Tim-Zhang/optimize-error-handling

agent: Optimize error handling
This commit is contained in:
Fupan Li 2020-10-16 09:15:22 +08:00 committed by GitHub
commit cacb27fe4b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 302 additions and 461 deletions

View File

@ -126,13 +126,12 @@ pub fn drop_privileges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
) )
.map_err(|e| anyhow!(e.to_string()))?; .map_err(|e| anyhow!(e.to_string()))?;
if let Err(_) = caps::set( let _ = caps::set(
None, None,
CapSet::Ambient, CapSet::Ambient,
to_capshashset(cfd_log, caps.ambient.as_ref()), to_capshashset(cfd_log, caps.ambient.as_ref()),
) { )
log_child!(cfd_log, "failed to set ambient capability"); .map_err(|_| log_child!(cfd_log, "failed to set ambient capability"));
}
Ok(()) Ok(())
} }

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, Context, Result};
use dirs; use dirs;
use lazy_static; use lazy_static;
use libc::pid_t; use libc::pid_t;
@ -67,17 +67,6 @@ const CLOG_FD: &str = "CLOG_FD";
const FIFO_FD: &str = "FIFO_FD"; const FIFO_FD: &str = "FIFO_FD";
const HOME_ENV_KEY: &str = "HOME"; const HOME_ENV_KEY: &str = "HOME";
#[macro_export]
macro_rules! check {
($what:expr, $where:expr) => ({
if let Err(e) = $what {
let subsystem = $where;
let logger = slog_scope::logger().new(o!("subsystem" => subsystem));
warn!(logger, "{:?}", e);
}
})
}
#[derive(PartialEq, Clone, Copy)] #[derive(PartialEq, Clone, Copy)]
pub enum Status { pub enum Status {
CREATED, CREATED,
@ -331,17 +320,11 @@ impl Container for LinuxContainer {
pub fn init_child() { pub fn init_child() {
let cwfd = std::env::var(CWFD_FD).unwrap().parse::<i32>().unwrap(); let cwfd = std::env::var(CWFD_FD).unwrap().parse::<i32>().unwrap();
let cfd_log = std::env::var(CLOG_FD).unwrap().parse::<i32>().unwrap(); let cfd_log = std::env::var(CLOG_FD).unwrap().parse::<i32>().unwrap();
match do_init_child(cwfd) {
Ok(_) => (), let _ = do_init_child(cwfd).map_err(|e| {
Err(e) => {
log_child!(cfd_log, "child exit: {:?}", e); log_child!(cfd_log, "child exit: {:?}", e);
check!( let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str()), });
"write_sync in init_child()"
);
return;
}
}
} }
fn do_init_child(cwfd: RawFd) -> Result<()> { fn do_init_child(cwfd: RawFd) -> Result<()> {
@ -406,19 +389,17 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
to_new.set(*s, true); to_new.set(*s, true);
} }
} else { } else {
let fd = match fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()) { let fd =
Ok(v) => v, fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
Err(e) => {
log_child!( log_child!(
cfd_log, cfd_log,
"cannot open type: {} path: {}", "cannot open type: {} path: {}",
ns.r#type.clone(), ns.r#type.clone(),
ns.path.clone() ns.path.clone()
); );
log_child!(cfd_log, "error is : {}", e.as_errno().unwrap().desc()); log_child!(cfd_log, "error is : {:?}", e.as_errno());
return Err(e.into()); e
} })?;
};
if *s != CloneFlags::CLONE_NEWPID { if *s != CloneFlags::CLONE_NEWPID {
to_join.push((*s, fd)); to_join.push((*s, fd));
@ -457,9 +438,8 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
// Ref: https://github.com/opencontainers/runc/commit/50a19c6ff828c58e5dab13830bd3dacde268afe5 // Ref: https://github.com/opencontainers/runc/commit/50a19c6ff828c58e5dab13830bd3dacde268afe5
// //
if !nses.is_empty() { if !nses.is_empty() {
if let Err(e) = prctl::set_dumpable(false) { prctl::set_dumpable(false)
return Err(anyhow!(e).context("set process non-dumpable failed")); .map_err(|e| anyhow!(e).context("set process non-dumpable failed"))?;
};
} }
if userns { if userns {
@ -488,23 +468,20 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
} }
log_child!(cfd_log, "join namespace {:?}", s); log_child!(cfd_log, "join namespace {:?}", s);
if let Err(e) = sched::setns(fd, s) { sched::setns(fd, s).or_else(|e| {
if s == CloneFlags::CLONE_NEWUSER { if s == CloneFlags::CLONE_NEWUSER {
if e.as_errno().unwrap() != Errno::EINVAL { if e.as_errno().unwrap() != Errno::EINVAL {
check!( let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str()), return Err(e);
"write_sync for CLONE_NEWUSER"
);
return Err(e.into());
} }
Ok(())
} else { } else {
check!( let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str()), Err(e)
"write_sync for sched::setns"
);
return Err(e.into());
}
} }
})?;
unistd::close(fd)?; unistd::close(fd)?;
if s == CloneFlags::CLONE_NEWUSER { if s == CloneFlags::CLONE_NEWUSER {
@ -576,23 +553,19 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
if guser.additional_gids.len() > 0 { if guser.additional_gids.len() > 0 {
setgroups(guser.additional_gids.as_slice()).map_err(|e| { setgroups(guser.additional_gids.as_slice()).map_err(|e| {
check!( let _ = write_sync(
write_sync(
cwfd, cwfd,
SYNC_FAILED, SYNC_FAILED,
format!("setgroups failed: {:?}", e).as_str() format!("setgroups failed: {:?}", e).as_str(),
),
"write_sync for setgroups"
); );
e e
})?; })?;
} }
// NoNewPeiviledges, Drop capabilities // NoNewPeiviledges, Drop capabilities
if oci_process.no_new_privileges { if oci_process.no_new_privileges {
if let Err(_) = prctl::set_no_new_privileges(true) { prctl::set_no_new_privileges(true).map_err(|_| anyhow!("cannot set no new privileges"))?;
return Err(anyhow!("cannot set no new privileges"));
}
} }
if oci_process.capabilities.is_some() { if oci_process.capabilities.is_some() {
@ -639,20 +612,15 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
let exec_file = Path::new(&args[0]); let exec_file = Path::new(&args[0]);
log_child!(cfd_log, "process command: {:?}", &args); log_child!(cfd_log, "process command: {:?}", &args);
if !exec_file.exists() { if !exec_file.exists() {
match find_file(exec_file) { find_file(exec_file).ok_or_else(|| anyhow!("the file {} is not exist", &args[0]))?;
Some(_) => (),
None => {
return Err(anyhow!("the file {} is not exist", &args[0]));
}
}
} }
// notify parent that the child's ready to start // notify parent that the child's ready to start
write_sync(cwfd, SYNC_SUCCESS, "")?; write_sync(cwfd, SYNC_SUCCESS, "")?;
log_child!(cfd_log, "ready to run exec"); log_child!(cfd_log, "ready to run exec");
check!(unistd::close(cfd_log), "closing cfd log"); let _ = unistd::close(cfd_log);
check!(unistd::close(crfd), "closing crfd"); let _ = unistd::close(crfd);
check!(unistd::close(cwfd), "closing cwfd"); let _ = unistd::close(cwfd);
if oci_process.terminal { if oci_process.terminal {
unistd::setsid()?; unistd::setsid()?;
@ -789,10 +757,9 @@ impl BaseContainer for LinuxContainer {
let st = self.oci_state()?; let st = self.oci_state()?;
let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?; let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?;
check!(
fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)), let _ = fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
"fcntl pfd log FD_CLOEXEC" .map_err(|e| warn!(logger, "fcntl pfd log FD_CLOEXEC {:?}", e));
);
let child_logger = logger.new(o!("action" => "child process log")); let child_logger = logger.new(o!("action" => "child process log"));
let log_handler = thread::spawn(move || { let log_handler = thread::spawn(move || {
@ -821,18 +788,16 @@ impl BaseContainer for LinuxContainer {
info!(logger, "exec fifo opened!"); info!(logger, "exec fifo opened!");
let (prfd, cwfd) = unistd::pipe().context("failed to create pipe")?; let (prfd, cwfd) = unistd::pipe().context("failed to create pipe")?;
let (crfd, pwfd) = unistd::pipe().context("failed to create pipe")?; let (crfd, pwfd) = unistd::pipe().context("failed to create pipe")?;
check!(
fcntl::fcntl(prfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)), let _ = fcntl::fcntl(prfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
"fcntl prfd FD_CLOEXEC" .map_err(|e| warn!(logger, "fcntl prfd FD_CLOEXEC {:?}", e));
);
check!( let _ = fcntl::fcntl(pwfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
fcntl::fcntl(pwfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)), .map_err(|e| warn!(logger, "fcntl pwfd FD_COLEXEC {:?}", e));
"fcntl pwfd FD_COLEXEC"
);
defer!({ defer!({
check!(unistd::close(prfd), "close prfd"); let _ = unistd::close(prfd).map_err(|e| warn!(logger, "close prfd {:?}", e));
check!(unistd::close(pwfd), "close pwfd"); let _ = unistd::close(pwfd).map_err(|e| warn!(logger, "close pwfd {:?}", e));
}); });
let child_stdin: std::process::Stdio; let child_stdin: std::process::Stdio;
@ -842,14 +807,10 @@ impl BaseContainer for LinuxContainer {
if tty { if tty {
let pseudo = pty::openpty(None, None)?; let pseudo = pty::openpty(None, None)?;
p.term_master = Some(pseudo.master); p.term_master = Some(pseudo.master);
check!( let _ = fcntl::fcntl(pseudo.master, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
fcntl::fcntl(pseudo.master, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)), .map_err(|e| warn!(logger, "fnctl pseudo.master {:?}", e));
"fnctl pseudo.master" let _ = fcntl::fcntl(pseudo.slave, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
); .map_err(|e| warn!(logger, "fcntl pseudo.slave {:?}", e));
check!(
fcntl::fcntl(pseudo.slave, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)),
"fcntl pseudo.slave"
);
child_stdin = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) }; child_stdin = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) };
child_stdout = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) }; child_stdout = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) };
@ -863,24 +824,21 @@ impl BaseContainer for LinuxContainer {
child_stderr = unsafe { std::process::Stdio::from_raw_fd(stderr) }; child_stderr = unsafe { std::process::Stdio::from_raw_fd(stderr) };
} }
let old_pid_ns = match fcntl::open(PID_NS_PATH, OFlag::O_CLOEXEC, Mode::empty()) { let old_pid_ns =
Ok(v) => v, fcntl::open(PID_NS_PATH, OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
Err(e) => {
error!( error!(
logger, logger,
"cannot open pid ns path: {} with error: {:?}", PID_NS_PATH, e "cannot open pid ns path: {} with error: {:?}", PID_NS_PATH, e
); );
return Err(e.into()); e
} })?;
};
//restore the parent's process's pid namespace. //restore the parent's process's pid namespace.
defer!({ defer!({
check!( let _ = sched::setns(old_pid_ns, CloneFlags::CLONE_NEWPID)
sched::setns(old_pid_ns, CloneFlags::CLONE_NEWPID), .map_err(|e| warn!(logger, "settns CLONE_NEWPID {:?}", e));
"settns CLONE_NEWPID" let _ = unistd::close(old_pid_ns)
); .map_err(|e| warn!(logger, "close old pid namespace {:?}", e));
check!(unistd::close(old_pid_ns), "close old pid namespace");
}); });
let pidns = get_pid_namespace(&self.logger, linux)?; let pidns = get_pid_namespace(&self.logger, linux)?;
@ -922,12 +880,12 @@ impl BaseContainer for LinuxContainer {
} }
if p.init { if p.init {
check!(unistd::close(fifofd), "close fifofd"); let _ = unistd::close(fifofd).map_err(|e| warn!(logger, "close fifofd {:?}", e));
} }
info!(logger, "child pid: {}", p.pid); info!(logger, "child pid: {}", p.pid);
match join_namespaces( join_namespaces(
&logger, &logger,
&spec, &spec,
&p, &p,
@ -935,18 +893,15 @@ impl BaseContainer for LinuxContainer {
&st, &st,
pwfd, pwfd,
prfd, prfd,
) { )
Ok(_) => (), .map_err(|e| {
Err(e) => {
error!(logger, "create container process error {:?}", e); error!(logger, "create container process error {:?}", e);
// kill the child process. // kill the child process.
check!( let _ = signal::kill(Pid::from_raw(p.pid), Some(Signal::SIGKILL))
signal::kill(Pid::from_raw(p.pid), Some(Signal::SIGKILL)), .map_err(|e| warn!(logger, "signal::kill joining namespaces {:?}", e));
"signal::kill joining namespaces"
); e
return Err(e); })?;
}
};
info!(logger, "entered namespaces!"); info!(logger, "entered namespaces!");
@ -956,10 +911,9 @@ impl BaseContainer for LinuxContainer {
let (exit_pipe_r, exit_pipe_w) = unistd::pipe2(OFlag::O_CLOEXEC) let (exit_pipe_r, exit_pipe_w) = unistd::pipe2(OFlag::O_CLOEXEC)
.context("failed to create pipe") .context("failed to create pipe")
.map_err(|e| { .map_err(|e| {
check!( let _ = signal::kill(Pid::from_raw(child.id() as i32), Some(Signal::SIGKILL))
signal::kill(Pid::from_raw(child.id() as i32), Some(Signal::SIGKILL)), .map_err(|e| warn!(logger, "signal::kill creating pipe {:?}", e));
"signal::kill creating pipe"
);
e e
})?; })?;
@ -973,7 +927,9 @@ impl BaseContainer for LinuxContainer {
self.processes.insert(p.pid, p); self.processes.insert(p.pid, p);
info!(logger, "wait on child log handler"); info!(logger, "wait on child log handler");
check!(log_handler.join(), "joining log handler"); let _ = log_handler
.join()
.map_err(|e| warn!(logger, "joining log handler {:?}", e));
info!(logger, "create process completed"); info!(logger, "create process completed");
return Ok(()); return Ok(());
} }
@ -1074,24 +1030,22 @@ fn do_exec(args: &[String]) -> ! {
.collect(); .collect();
let a: Vec<&CStr> = sa.iter().map(|s| s.as_c_str()).collect(); let a: Vec<&CStr> = sa.iter().map(|s| s.as_c_str()).collect();
if let Err(e) = unistd::execvp(p.as_c_str(), a.as_slice()) { let _ = unistd::execvp(p.as_c_str(), a.as_slice()).map_err(|e| match e {
match e {
nix::Error::Sys(errno) => { nix::Error::Sys(errno) => {
std::process::exit(errno as i32); std::process::exit(errno as i32);
} }
_ => std::process::exit(-2), _ => std::process::exit(-2),
} });
}
unreachable!() unreachable!()
} }
fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Result<()> { fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Result<()> {
info!(logger, "updating namespaces"); info!(logger, "updating namespaces");
let linux = match spec.linux.as_mut() { let linux = spec
None => return Err(anyhow!("Spec didn't contain linux field")), .linux
Some(l) => l, .as_mut()
}; .ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
let namespaces = linux.namespaces.as_mut_slice(); let namespaces = linux.namespaces.as_mut_slice();
for namespace in namespaces.iter_mut() { for namespace in namespaces.iter_mut() {
@ -1118,19 +1072,18 @@ fn get_pid_namespace(logger: &Logger, linux: &Linux) -> Result<Option<RawFd>> {
return Ok(None); return Ok(None);
} }
let fd = match fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()) { let fd =
Ok(v) => v, fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
Err(e) => {
error!( error!(
logger, logger,
"cannot open type: {} path: {}", "cannot open type: {} path: {}",
ns.r#type.clone(), ns.r#type.clone(),
ns.path.clone() ns.path.clone()
); );
error!(logger, "error is : {}", e.as_errno().unwrap().desc()); error!(logger, "error is : {:?}", e.as_errno());
return Err(e.into());
} e
}; })?;
return Ok(Some(fd)); return Ok(Some(fd));
} }
@ -1278,22 +1231,19 @@ fn write_mappings(logger: &Logger, path: &str, maps: &[LinuxIDMapping]) -> Resul
if !data.is_empty() { if !data.is_empty() {
let fd = fcntl::open(path, OFlag::O_WRONLY, Mode::empty())?; let fd = fcntl::open(path, OFlag::O_WRONLY, Mode::empty())?;
defer!(unistd::close(fd).unwrap()); defer!(unistd::close(fd).unwrap());
match unistd::write(fd, data.as_bytes()) { unistd::write(fd, data.as_bytes()).map_err(|e| {
Ok(_) => {}
Err(e) => {
info!(logger, "cannot write mapping"); info!(logger, "cannot write mapping");
return Err(e.into()); e
} })?;
}
} }
Ok(()) Ok(())
} }
fn setid(uid: Uid, gid: Gid) -> Result<()> { fn setid(uid: Uid, gid: Gid) -> Result<()> {
// set uid/gid // set uid/gid
if let Err(e) = prctl::set_keep_capabilities(true) { prctl::set_keep_capabilities(true)
bail!(anyhow!(e).context("set keep capabilities returned")); .map_err(|e| anyhow!(e).context("set keep capabilities returned"))?;
};
{ {
unistd::setresgid(gid, gid, gid)?; unistd::setresgid(gid, gid, gid)?;
} }
@ -1305,9 +1255,9 @@ fn setid(uid: Uid, gid: Gid) -> Result<()> {
capabilities::reset_effective()?; capabilities::reset_effective()?;
} }
if let Err(e) = prctl::set_keep_capabilities(false) { prctl::set_keep_capabilities(false)
bail!(anyhow!(e).context("set keep capabilities returned")); .map_err(|e| anyhow!(e).context("set keep capabilities returned"))?;
};
Ok(()) Ok(())
} }
@ -1325,13 +1275,13 @@ impl LinuxContainer {
// validate oci spec // validate oci spec
validator::validate(&config)?; validator::validate(&config)?;
if let Err(e) = fs::create_dir_all(root.as_str()) { fs::create_dir_all(root.as_str()).map_err(|e| {
if e.kind() == std::io::ErrorKind::AlreadyExists { if e.kind() == std::io::ErrorKind::AlreadyExists {
return Err(e).context(format!("container {} already exists", id.as_str())); return anyhow!(e).context(format!("container {} already exists", id.as_str()));
} }
return Err(e).context(format!("fail to create container directory {}", root)); anyhow!(e).context(format!("fail to create container directory {}", root))
} })?;
unistd::chown( unistd::chown(
root.as_str(), root.as_str(),

View File

@ -407,8 +407,7 @@ fn mount_cgroups(
if key != base { if key != base {
let src = format!("{}/{}", m.destination.as_str(), key); let src = format!("{}/{}", m.destination.as_str(), key);
match unix::fs::symlink(destination.as_str(), &src[1..]) { unix::fs::symlink(destination.as_str(), &src[1..]).map_err(|e| {
Err(e) => {
log_child!( log_child!(
cfd_log, cfd_log,
"symlink: {} {} err: {}", "symlink: {} {} err: {}",
@ -417,10 +416,8 @@ fn mount_cgroups(
e.to_string() e.to_string()
); );
return Err(e.into()); e
} })?;
Ok(_) => {}
}
} }
} }
@ -615,9 +612,7 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
MsFlags::MS_SLAVE | MsFlags::MS_REC, MsFlags::MS_SLAVE | MsFlags::MS_REC,
None::<&str>, None::<&str>,
)?; )?;
match umount2(abs_mount_point, MntFlags::MNT_DETACH) { umount2(abs_mount_point, MntFlags::MNT_DETACH).or_else(|e| {
Ok(_) => (),
Err(e) => {
if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) { if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) {
return Err(anyhow!(e)); return Err(anyhow!(e));
} }
@ -631,8 +626,9 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
MsFlags::empty(), MsFlags::empty(),
None::<&str>, None::<&str>,
)?; )?;
}
} Ok(())
})?;
} }
mount( mount(
@ -689,18 +685,14 @@ fn mount_from(
Path::new(&dest) Path::new(&dest)
}; };
// let _ = fs::create_dir_all(&dir); let _ = fs::create_dir_all(&dir).map_err(|e| {
match fs::create_dir_all(&dir) {
Ok(_) => {}
Err(e) => {
log_child!( log_child!(
cfd_log, cfd_log,
"creat dir {}: {}", "creat dir {}: {}",
dir.to_str().unwrap(), dir.to_str().unwrap(),
e.to_string() e.to_string()
); )
} });
}
// make sure file exists so we can bind over it // make sure file exists so we can bind over it
if src.is_file() { if src.is_file() {
@ -717,31 +709,26 @@ fn mount_from(
} }
}; };
match stat::stat(dest.as_str()) { let _ = stat::stat(dest.as_str()).map_err(|e| {
Ok(_) => {}
Err(e) => {
log_child!( log_child!(
cfd_log, cfd_log,
"dest stat error. {}: {}", "dest stat error. {}: {:?}",
dest.as_str(), dest.as_str(),
e.as_errno().unwrap().desc() e.as_errno()
); )
} });
}
match mount( mount(
Some(src.as_str()), Some(src.as_str()),
dest.as_str(), dest.as_str(),
Some(m.r#type.as_str()), Some(m.r#type.as_str()),
flags, flags,
Some(d.as_str()), Some(d.as_str()),
) { )
Ok(_) => {} .map_err(|e| {
Err(e) => { log_child!(cfd_log, "mount error: {:?}", e.as_errno());
log_child!(cfd_log, "mount error: {}", e.as_errno().unwrap().desc()); e
return Err(e.into()); })?;
}
}
if flags.contains(MsFlags::MS_BIND) if flags.contains(MsFlags::MS_BIND)
&& flags.intersects( && flags.intersects(
@ -753,24 +740,17 @@ fn mount_from(
| MsFlags::MS_SLAVE), | MsFlags::MS_SLAVE),
) )
{ {
match mount( mount(
Some(dest.as_str()), Some(dest.as_str()),
dest.as_str(), dest.as_str(),
None::<&str>, None::<&str>,
flags | MsFlags::MS_REMOUNT, flags | MsFlags::MS_REMOUNT,
None::<&str>, None::<&str>,
) { )
Err(e) => { .map_err(|e| {
log_child!( log_child!(cfd_log, "remout {}: {:?}", dest.as_str(), e.as_errno());
cfd_log, e
"remout {}: {}", })?;
dest.as_str(),
e.as_errno().unwrap().desc()
);
return Err(e.into());
}
Ok(_) => {}
}
} }
Ok(()) Ok(())
} }

View File

@ -143,21 +143,15 @@ pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
}, },
SYNC_DATA => { SYNC_DATA => {
let length: i32 = data_str.len() as i32; let length: i32 = data_str.len() as i32;
match write_count(fd, &length.to_be_bytes(), MSG_SIZE) { write_count(fd, &length.to_be_bytes(), MSG_SIZE).or_else(|e| {
Ok(_count) => (),
Err(e) => {
unistd::close(fd)?; unistd::close(fd)?;
return Err(anyhow!(e).context("error in send message to process")); Err(anyhow!(e).context("error in send message to process"))
} })?;
}
match write_count(fd, data_str.as_bytes(), data_str.len()) { write_count(fd, data_str.as_bytes(), data_str.len()).or_else(|e| {
Ok(_count) => (),
Err(e) => {
unistd::close(fd)?; unistd::close(fd)?;
return Err(anyhow!(e).context("error in send message to process")); Err(anyhow!(e).context("error in send message to process"))
} })?;
}
} }
_ => (), _ => (),

View File

@ -137,17 +137,14 @@ fn get_device_name(sandbox: &Arc<Mutex<Sandbox>>, dev_addr: &str) -> Result<Stri
info!(sl!(), "Waiting on channel for device notification\n"); info!(sl!(), "Waiting on channel for device notification\n");
let hotplug_timeout = AGENT_CONFIG.read().unwrap().hotplug_timeout; let hotplug_timeout = AGENT_CONFIG.read().unwrap().hotplug_timeout;
let dev_name = match rx.recv_timeout(hotplug_timeout) { let dev_name = rx.recv_timeout(hotplug_timeout).map_err(|_| {
Ok(name) => name,
Err(_) => {
GLOBAL_DEVICE_WATCHER.lock().unwrap().remove_entry(dev_addr); GLOBAL_DEVICE_WATCHER.lock().unwrap().remove_entry(dev_addr);
return Err(anyhow!( anyhow!(
"Timeout reached after {:?} waiting for device {}", "Timeout reached after {:?} waiting for device {}",
hotplug_timeout, hotplug_timeout,
dev_addr dev_addr
)); )
} })?;
};
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &dev_name)) Ok(format!("{}/{}", SYSTEM_DEV_PATH, &dev_name))
} }
@ -214,10 +211,10 @@ fn update_spec_device_list(device: &Device, spec: &mut Spec, devidx: &DevIndex)
)); ));
} }
let linux = match spec.linux.as_mut() { let linux = spec
None => return Err(anyhow!("Spec didn't container linux field")), .linux
Some(l) => l, .as_mut()
}; .ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
if !Path::new(&device.vm_path).exists() { if !Path::new(&device.vm_path).exists() {
return Err(anyhow!("vm_path:{} doesn't exist", device.vm_path)); return Err(anyhow!("vm_path:{} doesn't exist", device.vm_path));
@ -411,10 +408,10 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
let major = stat::major(rdev) as i64; let major = stat::major(rdev) as i64;
let minor = stat::minor(rdev) as i64; let minor = stat::minor(rdev) as i64;
let linux = match spec.linux.as_mut() { let linux = spec
None => return Err(anyhow!("Spec didn't container linux field")), .linux
Some(l) => l, .as_mut()
}; .ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
if linux.resources.is_none() { if linux.resources.is_none() {
linux.resources = Some(LinuxResources::default()); linux.resources = Some(LinuxResources::default());

View File

@ -512,14 +512,12 @@ fn run_debug_console_shell(logger: &Logger, shell: &str, socket_fd: RawFd) -> Re
let args: Vec<&CStr> = vec![]; let args: Vec<&CStr> = vec![];
// run shell // run shell
if let Err(e) = unistd::execvp(cmd.as_c_str(), args.as_slice()) { let _ = unistd::execvp(cmd.as_c_str(), args.as_slice()).map_err(|e| match e {
match e {
nix::Error::Sys(errno) => { nix::Error::Sys(errno) => {
std::process::exit(errno as i32); std::process::exit(errno as i32);
} }
_ => std::process::exit(-2), _ => std::process::exit(-2),
} });
}
} }
Ok(ForkResult::Parent { child: child_pid }) => { Ok(ForkResult::Parent { child: child_pid }) => {

View File

@ -251,10 +251,7 @@ fn ephemeral_storage_handler(
return Ok("".to_string()); return Ok("".to_string());
} }
if let Err(err) = fs::create_dir_all(Path::new(&storage.mount_point)) { fs::create_dir_all(Path::new(&storage.mount_point))?;
return Err(err.into());
}
common_storage_handler(logger, storage)?; common_storage_handler(logger, storage)?;
Ok("".to_string()) Ok("".to_string())
@ -449,21 +446,17 @@ pub fn add_storages(
"subsystem" => "storage", "subsystem" => "storage",
"storage-type" => handler_name.to_owned())); "storage-type" => handler_name.to_owned()));
let handler = match STORAGEHANDLERLIST.get(&handler_name.as_str()) { let handler = STORAGEHANDLERLIST
None => { .get(&handler_name.as_str())
return Err(anyhow!( .ok_or_else(|| {
anyhow!(
"Failed to find the storage handler {}", "Failed to find the storage handler {}",
storage.driver.to_owned() storage.driver.to_owned()
)); )
} })?;
Some(f) => f,
};
let mount_point = match handler(&logger, &storage, sandbox.clone()) {
// Todo need to rollback the mounted storage if err met. // Todo need to rollback the mounted storage if err met.
Err(e) => return Err(e), let mount_point = handler(&logger, &storage, sandbox.clone())?;
Ok(m) => m,
};
if mount_point.len() > 0 { if mount_point.len() > 0 {
mount_list.push(mount_point); mount_list.push(mount_point);
@ -482,15 +475,18 @@ fn mount_to_rootfs(logger: &Logger, m: &INIT_MOUNT) -> Result<()> {
fs::create_dir_all(Path::new(m.dest)).context("could not create directory")?; fs::create_dir_all(Path::new(m.dest)).context("could not create directory")?;
if let Err(err) = bare_mount.mount() { bare_mount.mount().or_else(|e| {
if m.src != "dev" { if m.src != "dev" {
return Err(err.into()); return Err(e);
} }
error!( error!(
logger, logger,
"Could not mount filesystem from {} to {}", m.src, m.dest "Could not mount filesystem from {} to {}", m.src, m.dest
); );
}
Ok(())
})?;
Ok(()) Ok(())
} }
@ -659,10 +655,9 @@ pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> {
fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> { fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
let d = Path::new(destination); let d = Path::new(destination);
if !d.exists() { if !d.exists() {
let dir = match d.parent() { let dir = d
Some(d) => d, .parent()
None => return Err(anyhow!("mount destination {} doesn't exist", destination)), .ok_or_else(|| anyhow!("mount destination {} doesn't exist", destination))?;
};
if !dir.exists() { if !dir.exists() {
fs::create_dir_all(dir).context(format!("create dir all failed on {:?}", dir))?; fs::create_dir_all(dir).context(format!("create dir all failed on {:?}", dir))?;
} }

View File

@ -3,15 +3,15 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
use anyhow::{anyhow, Result};
use nix::mount::MsFlags; use nix::mount::MsFlags;
use nix::sched::{unshare, CloneFlags}; use nix::sched::{unshare, CloneFlags};
use nix::unistd::{getpid, gettid}; use nix::unistd::{getpid, gettid};
use std::fmt; use std::fmt;
use std::fs; use std::fs;
use std::fs::File; use std::fs::File;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::thread; use std::thread::{self};
use crate::mount::{BareMount, FLAGS}; use crate::mount::{BareMount, FLAGS};
use slog::Logger; use slog::Logger;
@ -77,10 +77,8 @@ impl Namespace {
// setup creates persistent namespace without switching to it. // setup creates persistent namespace without switching to it.
// Note, pid namespaces cannot be persisted. // Note, pid namespaces cannot be persisted.
pub fn setup(mut self) -> Result<Self, String> { pub fn setup(mut self) -> Result<Self> {
if let Err(err) = fs::create_dir_all(&self.persistent_ns_dir) { fs::create_dir_all(&self.persistent_ns_dir)?;
return Err(err.to_string());
}
let ns_path = PathBuf::from(&self.persistent_ns_dir); let ns_path = PathBuf::from(&self.persistent_ns_dir);
let ns_type = self.ns_type.clone(); let ns_type = self.ns_type.clone();
@ -88,33 +86,23 @@ impl Namespace {
let new_ns_path = ns_path.join(&ns_type.get()); let new_ns_path = ns_path.join(&ns_type.get());
if let Err(err) = File::create(new_ns_path.as_path()) { File::create(new_ns_path.as_path())?;
return Err(err.to_string());
}
self.path = new_ns_path.clone().into_os_string().into_string().unwrap(); self.path = new_ns_path.clone().into_os_string().into_string().unwrap();
let hostname = self.hostname.clone(); let hostname = self.hostname.clone();
let new_thread = thread::spawn(move || { let new_thread = thread::spawn(move || -> Result<()> {
let origin_ns_path = get_current_thread_ns_path(&ns_type.get()); let origin_ns_path = get_current_thread_ns_path(&ns_type.get());
let _origin_ns_fd = match File::open(Path::new(&origin_ns_path)) { File::open(Path::new(&origin_ns_path))?;
Err(err) => return Err(err.to_string()),
Ok(file) => file.as_raw_fd(),
};
// Create a new netns on the current thread. // Create a new netns on the current thread.
let cf = ns_type.get_flags().clone(); let cf = ns_type.get_flags().clone();
if let Err(err) = unshare(cf) { unshare(cf)?;
return Err(err.to_string());
}
if ns_type == NamespaceType::UTS && hostname.is_some() { if ns_type == NamespaceType::UTS && hostname.is_some() {
match nix::unistd::sethostname(hostname.unwrap()) { nix::unistd::sethostname(hostname.unwrap())?;
Err(err) => return Err(err.to_string()),
Ok(_) => (),
}
} }
// Bind mount the new namespace from the current thread onto the mount point to persist it. // Bind mount the new namespace from the current thread onto the mount point to persist it.
let source: &str = origin_ns_path.as_str(); let source: &str = origin_ns_path.as_str();
@ -131,23 +119,21 @@ impl Namespace {
}; };
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger); let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
if let Err(err) = bare_mount.mount() { bare_mount.mount().map_err(|e| {
return Err(format!( anyhow!(
"Failed to mount {} to {} with err:{:?}", "Failed to mount {} to {} with err:{:?}",
source, destination, err source,
)); destination,
} e
)
})?;
Ok(()) Ok(())
}); });
match new_thread.join() { new_thread
Ok(t) => match t { .join()
Err(err) => return Err(err), .map_err(|e| anyhow!("Failed to join thread {:?}!", e))??;
Ok(()) => (),
},
Err(err) => return Err(format!("Failed to join thread {:?}!", err)),
}
Ok(self) Ok(self)
} }

View File

@ -258,15 +258,12 @@ impl agentService {
}); });
}); });
if let Err(_) = rx.recv_timeout(Duration::from_secs(req.timeout as u64)) { rx.recv_timeout(Duration::from_secs(req.timeout as u64))
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::ETIME))); .map_err(|_| anyhow!(nix::Error::from_errno(nix::errno::Errno::ETIME)))?;
}
if let Err(_) = handle.join() { handle
return Err(anyhow!(nix::Error::from_errno( .join()
nix::errno::Errno::UnknownErrno .map_err(|_| anyhow!(nix::Error::from_errno(nix::errno::Errno::UnknownErrno)))?;
)));
}
let s = self.sandbox.clone(); let s = self.sandbox.clone();
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
@ -591,13 +588,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::WaitProcessRequest, req: protocols::agent::WaitProcessRequest,
) -> ttrpc::Result<WaitProcessResponse> { ) -> ttrpc::Result<WaitProcessResponse> {
match self.do_wait_process(req) { self.do_wait_process(req).map_err(|e| {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
} }
fn list_processes( fn list_processes(
@ -737,13 +730,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
"invalid container id".to_string(), "invalid container id".to_string(),
)))?; )))?;
match ctr.stats() { ctr.stats().map_err(|e| {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
} }
fn pause_container( fn pause_container(
@ -797,13 +786,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::WriteStreamRequest, req: protocols::agent::WriteStreamRequest,
) -> ttrpc::Result<WriteStreamResponse> { ) -> ttrpc::Result<WriteStreamResponse> {
match self.do_write_stream(req) { self.do_write_stream(req).map_err(|e| {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
} }
fn read_stdout( fn read_stdout(
@ -811,13 +796,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReadStreamRequest, req: protocols::agent::ReadStreamRequest,
) -> ttrpc::Result<ReadStreamResponse> { ) -> ttrpc::Result<ReadStreamResponse> {
match self.do_read_stream(req, true) { self.do_read_stream(req, true).map_err(|e| {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
} }
fn read_stderr( fn read_stderr(
@ -825,13 +806,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReadStreamRequest, req: protocols::agent::ReadStreamRequest,
) -> ttrpc::Result<ReadStreamResponse> { ) -> ttrpc::Result<ReadStreamResponse> {
match self.do_read_stream(req, false) { self.do_read_stream(req, false).map_err(|e| {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
} }
fn close_stdin( fn close_stdin(
@ -844,15 +821,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let p = match find_process(&mut sandbox, cid.as_str(), eid.as_str(), false) { let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
Ok(v) => v, ttrpc::Error::RpcStatus(ttrpc::get_status(
Err(e) => {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT, ttrpc::Code::INVALID_ARGUMENT,
format!("invalid argument: {:?}", e), format!("invalid argument: {:?}", e),
))); ))
} })?;
};
if p.term_master.is_some() { if p.term_master.is_some() {
let _ = unistd::close(p.term_master.unwrap()); let _ = unistd::close(p.term_master.unwrap());
@ -876,15 +850,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let eid = req.exec_id.clone(); let eid = req.exec_id.clone();
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap(); let mut sandbox = s.lock().unwrap();
let p = match find_process(&mut sandbox, cid.as_str(), eid.as_str(), false) { let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
Ok(v) => v, ttrpc::Error::RpcStatus(ttrpc::get_status(
Err(e) => {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::UNAVAILABLE, ttrpc::Code::UNAVAILABLE,
format!("invalid argument: {:?}", e), format!("invalid argument: {:?}", e),
))); ))
} })?;
};
if p.term_master.is_none() { if p.term_master.is_none() {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
@ -903,12 +874,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
}; };
let err = libc::ioctl(fd, TIOCSWINSZ, &win); let err = libc::ioctl(fd, TIOCSWINSZ, &win);
if let Err(e) = Errno::result(err).map(drop) { Errno::result(err).map(drop).map_err(|e| {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL, ttrpc::Code::INTERNAL,
format!("ioctl error: {:?}", e), format!("ioctl error: {:?}", e),
))); ))
} })?;
} }
Ok(Empty::new()) Ok(Empty::new())
@ -1076,12 +1047,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
s.running = true; s.running = true;
if !req.guest_hook_path.is_empty() { if !req.guest_hook_path.is_empty() {
if let Err(e) = s.add_hooks(&req.guest_hook_path) { let _ = s.add_hooks(&req.guest_hook_path).map_err(|e| {
error!( error!(
sl!(), sl!(),
"add guest hook {} failed: {:?}", req.guest_hook_path, e "add guest hook {} failed: {:?}", req.guest_hook_path, e
); );
} });
} }
if req.sandbox_id.len() > 0 { if req.sandbox_id.len() > 0 {
@ -1189,12 +1160,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox); let s = Arc::clone(&self.sandbox);
let sandbox = s.lock().unwrap(); let sandbox = s.lock().unwrap();
if let Err(e) = sandbox.online_cpu_memory(&req) { sandbox.online_cpu_memory(&req).map_err(|e| {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })?;
e.to_string(),
)));
}
Ok(Empty::new()) Ok(Empty::new())
} }
@ -1204,12 +1172,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReseedRandomDevRequest, req: protocols::agent::ReseedRandomDevRequest,
) -> ttrpc::Result<Empty> { ) -> ttrpc::Result<Empty> {
if let Err(e) = random::reseed_rng(req.data.as_slice()) { random::reseed_rng(req.data.as_slice()).map_err(|e| {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })?;
e.to_string(),
)));
}
Ok(Empty::new()) Ok(Empty::new())
} }
@ -1248,12 +1213,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::MemHotplugByProbeRequest, req: protocols::agent::MemHotplugByProbeRequest,
) -> ttrpc::Result<Empty> { ) -> ttrpc::Result<Empty> {
if let Err(e) = do_mem_hotplug_by_probe(&req.memHotplugProbeAddr) { do_mem_hotplug_by_probe(&req.memHotplugProbeAddr).map_err(|e| {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })?;
e.to_string(),
)));
}
Ok(Empty::new()) Ok(Empty::new())
} }
@ -1263,12 +1225,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::SetGuestDateTimeRequest, req: protocols::agent::SetGuestDateTimeRequest,
) -> ttrpc::Result<Empty> { ) -> ttrpc::Result<Empty> {
if let Err(e) = do_set_guest_date_time(req.Sec, req.Usec) { do_set_guest_date_time(req.Sec, req.Usec).map_err(|e| {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })?;
e.to_string(),
)));
}
Ok(Empty::new()) Ok(Empty::new())
} }
@ -1278,12 +1237,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext, _ctx: &ttrpc::TtrpcContext,
req: protocols::agent::CopyFileRequest, req: protocols::agent::CopyFileRequest,
) -> ttrpc::Result<Empty> { ) -> ttrpc::Result<Empty> {
if let Err(e) = do_copy_file(&req) { do_copy_file(&req).map_err(|e| {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
ttrpc::Code::INTERNAL, })?;
e.to_string(),
)));
}
Ok(Empty::new()) Ok(Empty::new())
} }
@ -1374,13 +1330,10 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
return Err(anyhow!("Invalid block size")); return Err(anyhow!("Invalid block size"));
} }
size = match u64::from_str_radix(v.trim(), 16) { size = u64::from_str_radix(v.trim(), 16).map_err(|_| {
Ok(h) => h,
Err(_) => {
warn!(sl!(), "failed to parse the str {} to hex", size); warn!(sl!(), "failed to parse the str {} to hex", size);
return Err(anyhow!("Invalid block size")); anyhow!("Invalid block size")
} })?;
};
} }
Err(e) => { Err(e) => {
info!(sl!(), "memory block size error: {:?}", e.kind()); info!(sl!(), "memory block size error: {:?}", e.kind());
@ -1649,12 +1602,14 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
PathBuf::from("/") PathBuf::from("/")
}; };
if let Err(e) = fs::create_dir_all(dir.to_str().unwrap()) { fs::create_dir_all(dir.to_str().unwrap()).or_else(|e| {
if e.kind() != std::io::ErrorKind::AlreadyExists { if e.kind() != std::io::ErrorKind::AlreadyExists {
return Err(e.into()); return Err(e);
}
} }
Ok(())
})?;
std::fs::set_permissions( std::fs::set_permissions(
dir.to_str().unwrap(), dir.to_str().unwrap(),
std::fs::Permissions::from_mode(req.dir_mode), std::fs::Permissions::from_mode(req.dir_mode),

View File

@ -143,16 +143,10 @@ impl Sandbox {
// It's assumed that caller is calling this method after // It's assumed that caller is calling this method after
// acquiring a lock on sandbox. // acquiring a lock on sandbox.
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> { pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
match self.unset_sandbox_storage(path) { if self.unset_sandbox_storage(path)? {
Ok(res) => {
if res {
return self.remove_sandbox_storage(path); return self.remove_sandbox_storage(path);
} }
}
Err(err) => {
return Err(err);
}
}
Ok(()) Ok(())
} }
@ -166,23 +160,17 @@ impl Sandbox {
pub fn setup_shared_namespaces(&mut self) -> Result<bool> { pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
// Set up shared IPC namespace // Set up shared IPC namespace
self.shared_ipcns = match Namespace::new(&self.logger).as_ipc().setup() { self.shared_ipcns = Namespace::new(&self.logger)
Ok(ns) => ns, .as_ipc()
Err(err) => { .setup()
return Err(anyhow!(err).context("Failed to setup persistent IPC namespace")); .context("Failed to setup persistent IPC namespace")?;
}
};
// // Set up shared UTS namespace // // Set up shared UTS namespace
self.shared_utsns = match Namespace::new(&self.logger) self.shared_utsns = Namespace::new(&self.logger)
.as_uts(self.hostname.as_str()) .as_uts(self.hostname.as_str())
.setup() .setup()
{ .context("Failed to setup persistent UTS namespace")?;
Ok(ns) => ns,
Err(err) => {
return Err(anyhow!(err).context("Failed to setup persistent UTS namespace"));
}
};
Ok(true) Ok(true)
} }
@ -316,10 +304,9 @@ impl Sandbox {
thread::spawn(move || { thread::spawn(move || {
for event in rx { for event in rx {
info!(logger, "got an OOM event {:?}", event); info!(logger, "got an OOM event {:?}", event);
match tx.send(container_id.clone()) { let _ = tx
Err(err) => error!(logger, "failed to send message: {:?}", err), .send(container_id.clone())
Ok(_) => {} .map_err(|e| error!(logger, "failed to send message: {:?}", e));
}
} }
}); });
} }

View File

@ -99,14 +99,14 @@ impl Uevent {
let online_path = format!("{}/{}/online", SYSFS_DIR, &self.devpath); let online_path = format!("{}/{}/online", SYSFS_DIR, &self.devpath);
// It's a memory hot-add event. // It's a memory hot-add event.
if online_path.starts_with(SYSFS_MEMORY_ONLINE_PATH) { if online_path.starts_with(SYSFS_MEMORY_ONLINE_PATH) {
if let Err(e) = online_device(online_path.as_ref()) { let _ = online_device(online_path.as_ref()).map_err(|e| {
error!( error!(
*logger, *logger,
"failed to online device"; "failed to online device";
"device" => &self.devpath, "device" => &self.devpath,
"error" => format!("{}", e), "error" => format!("{}", e),
); )
} });
return; return;
} }
} }