Merge pull request #942 from Tim-Zhang/optimize-error-handling

agent: Optimize error handling
This commit is contained in:
Fupan Li 2020-10-16 09:15:22 +08:00 committed by GitHub
commit cacb27fe4b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 302 additions and 461 deletions

View File

@ -126,13 +126,12 @@ pub fn drop_privileges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
)
.map_err(|e| anyhow!(e.to_string()))?;
if let Err(_) = caps::set(
let _ = caps::set(
None,
CapSet::Ambient,
to_capshashset(cfd_log, caps.ambient.as_ref()),
) {
log_child!(cfd_log, "failed to set ambient capability");
}
)
.map_err(|_| log_child!(cfd_log, "failed to set ambient capability"));
Ok(())
}

View File

@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0
//
use anyhow::{anyhow, bail, Context, Result};
use anyhow::{anyhow, Context, Result};
use dirs;
use lazy_static;
use libc::pid_t;
@ -67,17 +67,6 @@ const CLOG_FD: &str = "CLOG_FD";
const FIFO_FD: &str = "FIFO_FD";
const HOME_ENV_KEY: &str = "HOME";
#[macro_export]
macro_rules! check {
($what:expr, $where:expr) => ({
if let Err(e) = $what {
let subsystem = $where;
let logger = slog_scope::logger().new(o!("subsystem" => subsystem));
warn!(logger, "{:?}", e);
}
})
}
#[derive(PartialEq, Clone, Copy)]
pub enum Status {
CREATED,
@ -331,17 +320,11 @@ impl Container for LinuxContainer {
pub fn init_child() {
let cwfd = std::env::var(CWFD_FD).unwrap().parse::<i32>().unwrap();
let cfd_log = std::env::var(CLOG_FD).unwrap().parse::<i32>().unwrap();
match do_init_child(cwfd) {
Ok(_) => (),
Err(e) => {
let _ = do_init_child(cwfd).map_err(|e| {
log_child!(cfd_log, "child exit: {:?}", e);
check!(
write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str()),
"write_sync in init_child()"
);
return;
}
}
let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
});
}
fn do_init_child(cwfd: RawFd) -> Result<()> {
@ -406,19 +389,17 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
to_new.set(*s, true);
}
} else {
let fd = match fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()) {
Ok(v) => v,
Err(e) => {
let fd =
fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
log_child!(
cfd_log,
"cannot open type: {} path: {}",
ns.r#type.clone(),
ns.path.clone()
);
log_child!(cfd_log, "error is : {}", e.as_errno().unwrap().desc());
return Err(e.into());
}
};
log_child!(cfd_log, "error is : {:?}", e.as_errno());
e
})?;
if *s != CloneFlags::CLONE_NEWPID {
to_join.push((*s, fd));
@ -457,9 +438,8 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
// Ref: https://github.com/opencontainers/runc/commit/50a19c6ff828c58e5dab13830bd3dacde268afe5
//
if !nses.is_empty() {
if let Err(e) = prctl::set_dumpable(false) {
return Err(anyhow!(e).context("set process non-dumpable failed"));
};
prctl::set_dumpable(false)
.map_err(|e| anyhow!(e).context("set process non-dumpable failed"))?;
}
if userns {
@ -488,23 +468,20 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
}
log_child!(cfd_log, "join namespace {:?}", s);
if let Err(e) = sched::setns(fd, s) {
sched::setns(fd, s).or_else(|e| {
if s == CloneFlags::CLONE_NEWUSER {
if e.as_errno().unwrap() != Errno::EINVAL {
check!(
write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str()),
"write_sync for CLONE_NEWUSER"
);
return Err(e.into());
let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
return Err(e);
}
Ok(())
} else {
check!(
write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str()),
"write_sync for sched::setns"
);
return Err(e.into());
}
let _ = write_sync(cwfd, SYNC_FAILED, format!("{:?}", e).as_str());
Err(e)
}
})?;
unistd::close(fd)?;
if s == CloneFlags::CLONE_NEWUSER {
@ -576,23 +553,19 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
if guser.additional_gids.len() > 0 {
setgroups(guser.additional_gids.as_slice()).map_err(|e| {
check!(
write_sync(
let _ = write_sync(
cwfd,
SYNC_FAILED,
format!("setgroups failed: {:?}", e).as_str()
),
"write_sync for setgroups"
format!("setgroups failed: {:?}", e).as_str(),
);
e
})?;
}
// NoNewPeiviledges, Drop capabilities
if oci_process.no_new_privileges {
if let Err(_) = prctl::set_no_new_privileges(true) {
return Err(anyhow!("cannot set no new privileges"));
}
prctl::set_no_new_privileges(true).map_err(|_| anyhow!("cannot set no new privileges"))?;
}
if oci_process.capabilities.is_some() {
@ -639,20 +612,15 @@ fn do_init_child(cwfd: RawFd) -> Result<()> {
let exec_file = Path::new(&args[0]);
log_child!(cfd_log, "process command: {:?}", &args);
if !exec_file.exists() {
match find_file(exec_file) {
Some(_) => (),
None => {
return Err(anyhow!("the file {} is not exist", &args[0]));
}
}
find_file(exec_file).ok_or_else(|| anyhow!("the file {} is not exist", &args[0]))?;
}
// notify parent that the child's ready to start
write_sync(cwfd, SYNC_SUCCESS, "")?;
log_child!(cfd_log, "ready to run exec");
check!(unistd::close(cfd_log), "closing cfd log");
check!(unistd::close(crfd), "closing crfd");
check!(unistd::close(cwfd), "closing cwfd");
let _ = unistd::close(cfd_log);
let _ = unistd::close(crfd);
let _ = unistd::close(cwfd);
if oci_process.terminal {
unistd::setsid()?;
@ -789,10 +757,9 @@ impl BaseContainer for LinuxContainer {
let st = self.oci_state()?;
let (pfd_log, cfd_log) = unistd::pipe().context("failed to create pipe")?;
check!(
fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)),
"fcntl pfd log FD_CLOEXEC"
);
let _ = fcntl::fcntl(pfd_log, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| warn!(logger, "fcntl pfd log FD_CLOEXEC {:?}", e));
let child_logger = logger.new(o!("action" => "child process log"));
let log_handler = thread::spawn(move || {
@ -821,18 +788,16 @@ impl BaseContainer for LinuxContainer {
info!(logger, "exec fifo opened!");
let (prfd, cwfd) = unistd::pipe().context("failed to create pipe")?;
let (crfd, pwfd) = unistd::pipe().context("failed to create pipe")?;
check!(
fcntl::fcntl(prfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)),
"fcntl prfd FD_CLOEXEC"
);
check!(
fcntl::fcntl(pwfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)),
"fcntl pwfd FD_COLEXEC"
);
let _ = fcntl::fcntl(prfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| warn!(logger, "fcntl prfd FD_CLOEXEC {:?}", e));
let _ = fcntl::fcntl(pwfd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| warn!(logger, "fcntl pwfd FD_COLEXEC {:?}", e));
defer!({
check!(unistd::close(prfd), "close prfd");
check!(unistd::close(pwfd), "close pwfd");
let _ = unistd::close(prfd).map_err(|e| warn!(logger, "close prfd {:?}", e));
let _ = unistd::close(pwfd).map_err(|e| warn!(logger, "close pwfd {:?}", e));
});
let child_stdin: std::process::Stdio;
@ -842,14 +807,10 @@ impl BaseContainer for LinuxContainer {
if tty {
let pseudo = pty::openpty(None, None)?;
p.term_master = Some(pseudo.master);
check!(
fcntl::fcntl(pseudo.master, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)),
"fnctl pseudo.master"
);
check!(
fcntl::fcntl(pseudo.slave, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)),
"fcntl pseudo.slave"
);
let _ = fcntl::fcntl(pseudo.master, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| warn!(logger, "fnctl pseudo.master {:?}", e));
let _ = fcntl::fcntl(pseudo.slave, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))
.map_err(|e| warn!(logger, "fcntl pseudo.slave {:?}", e));
child_stdin = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) };
child_stdout = unsafe { std::process::Stdio::from_raw_fd(pseudo.slave) };
@ -863,24 +824,21 @@ impl BaseContainer for LinuxContainer {
child_stderr = unsafe { std::process::Stdio::from_raw_fd(stderr) };
}
let old_pid_ns = match fcntl::open(PID_NS_PATH, OFlag::O_CLOEXEC, Mode::empty()) {
Ok(v) => v,
Err(e) => {
let old_pid_ns =
fcntl::open(PID_NS_PATH, OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
error!(
logger,
"cannot open pid ns path: {} with error: {:?}", PID_NS_PATH, e
);
return Err(e.into());
}
};
e
})?;
//restore the parent's process's pid namespace.
defer!({
check!(
sched::setns(old_pid_ns, CloneFlags::CLONE_NEWPID),
"settns CLONE_NEWPID"
);
check!(unistd::close(old_pid_ns), "close old pid namespace");
let _ = sched::setns(old_pid_ns, CloneFlags::CLONE_NEWPID)
.map_err(|e| warn!(logger, "settns CLONE_NEWPID {:?}", e));
let _ = unistd::close(old_pid_ns)
.map_err(|e| warn!(logger, "close old pid namespace {:?}", e));
});
let pidns = get_pid_namespace(&self.logger, linux)?;
@ -922,12 +880,12 @@ impl BaseContainer for LinuxContainer {
}
if p.init {
check!(unistd::close(fifofd), "close fifofd");
let _ = unistd::close(fifofd).map_err(|e| warn!(logger, "close fifofd {:?}", e));
}
info!(logger, "child pid: {}", p.pid);
match join_namespaces(
join_namespaces(
&logger,
&spec,
&p,
@ -935,18 +893,15 @@ impl BaseContainer for LinuxContainer {
&st,
pwfd,
prfd,
) {
Ok(_) => (),
Err(e) => {
)
.map_err(|e| {
error!(logger, "create container process error {:?}", e);
// kill the child process.
check!(
signal::kill(Pid::from_raw(p.pid), Some(Signal::SIGKILL)),
"signal::kill joining namespaces"
);
return Err(e);
}
};
let _ = signal::kill(Pid::from_raw(p.pid), Some(Signal::SIGKILL))
.map_err(|e| warn!(logger, "signal::kill joining namespaces {:?}", e));
e
})?;
info!(logger, "entered namespaces!");
@ -956,10 +911,9 @@ impl BaseContainer for LinuxContainer {
let (exit_pipe_r, exit_pipe_w) = unistd::pipe2(OFlag::O_CLOEXEC)
.context("failed to create pipe")
.map_err(|e| {
check!(
signal::kill(Pid::from_raw(child.id() as i32), Some(Signal::SIGKILL)),
"signal::kill creating pipe"
);
let _ = signal::kill(Pid::from_raw(child.id() as i32), Some(Signal::SIGKILL))
.map_err(|e| warn!(logger, "signal::kill creating pipe {:?}", e));
e
})?;
@ -973,7 +927,9 @@ impl BaseContainer for LinuxContainer {
self.processes.insert(p.pid, p);
info!(logger, "wait on child log handler");
check!(log_handler.join(), "joining log handler");
let _ = log_handler
.join()
.map_err(|e| warn!(logger, "joining log handler {:?}", e));
info!(logger, "create process completed");
return Ok(());
}
@ -1074,24 +1030,22 @@ fn do_exec(args: &[String]) -> ! {
.collect();
let a: Vec<&CStr> = sa.iter().map(|s| s.as_c_str()).collect();
if let Err(e) = unistd::execvp(p.as_c_str(), a.as_slice()) {
match e {
let _ = unistd::execvp(p.as_c_str(), a.as_slice()).map_err(|e| match e {
nix::Error::Sys(errno) => {
std::process::exit(errno as i32);
}
_ => std::process::exit(-2),
}
}
});
unreachable!()
}
fn update_namespaces(logger: &Logger, spec: &mut Spec, init_pid: RawFd) -> Result<()> {
info!(logger, "updating namespaces");
let linux = match spec.linux.as_mut() {
None => return Err(anyhow!("Spec didn't contain linux field")),
Some(l) => l,
};
let linux = spec
.linux
.as_mut()
.ok_or_else(|| anyhow!("Spec didn't contain linux field"))?;
let namespaces = linux.namespaces.as_mut_slice();
for namespace in namespaces.iter_mut() {
@ -1118,19 +1072,18 @@ fn get_pid_namespace(logger: &Logger, linux: &Linux) -> Result<Option<RawFd>> {
return Ok(None);
}
let fd = match fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()) {
Ok(v) => v,
Err(e) => {
let fd =
fcntl::open(ns.path.as_str(), OFlag::O_CLOEXEC, Mode::empty()).map_err(|e| {
error!(
logger,
"cannot open type: {} path: {}",
ns.r#type.clone(),
ns.path.clone()
);
error!(logger, "error is : {}", e.as_errno().unwrap().desc());
return Err(e.into());
}
};
error!(logger, "error is : {:?}", e.as_errno());
e
})?;
return Ok(Some(fd));
}
@ -1278,22 +1231,19 @@ fn write_mappings(logger: &Logger, path: &str, maps: &[LinuxIDMapping]) -> Resul
if !data.is_empty() {
let fd = fcntl::open(path, OFlag::O_WRONLY, Mode::empty())?;
defer!(unistd::close(fd).unwrap());
match unistd::write(fd, data.as_bytes()) {
Ok(_) => {}
Err(e) => {
unistd::write(fd, data.as_bytes()).map_err(|e| {
info!(logger, "cannot write mapping");
return Err(e.into());
}
}
e
})?;
}
Ok(())
}
fn setid(uid: Uid, gid: Gid) -> Result<()> {
// set uid/gid
if let Err(e) = prctl::set_keep_capabilities(true) {
bail!(anyhow!(e).context("set keep capabilities returned"));
};
prctl::set_keep_capabilities(true)
.map_err(|e| anyhow!(e).context("set keep capabilities returned"))?;
{
unistd::setresgid(gid, gid, gid)?;
}
@ -1305,9 +1255,9 @@ fn setid(uid: Uid, gid: Gid) -> Result<()> {
capabilities::reset_effective()?;
}
if let Err(e) = prctl::set_keep_capabilities(false) {
bail!(anyhow!(e).context("set keep capabilities returned"));
};
prctl::set_keep_capabilities(false)
.map_err(|e| anyhow!(e).context("set keep capabilities returned"))?;
Ok(())
}
@ -1325,13 +1275,13 @@ impl LinuxContainer {
// validate oci spec
validator::validate(&config)?;
if let Err(e) = fs::create_dir_all(root.as_str()) {
fs::create_dir_all(root.as_str()).map_err(|e| {
if e.kind() == std::io::ErrorKind::AlreadyExists {
return Err(e).context(format!("container {} already exists", id.as_str()));
return anyhow!(e).context(format!("container {} already exists", id.as_str()));
}
return Err(e).context(format!("fail to create container directory {}", root));
}
anyhow!(e).context(format!("fail to create container directory {}", root))
})?;
unistd::chown(
root.as_str(),

View File

@ -407,8 +407,7 @@ fn mount_cgroups(
if key != base {
let src = format!("{}/{}", m.destination.as_str(), key);
match unix::fs::symlink(destination.as_str(), &src[1..]) {
Err(e) => {
unix::fs::symlink(destination.as_str(), &src[1..]).map_err(|e| {
log_child!(
cfd_log,
"symlink: {} {} err: {}",
@ -417,10 +416,8 @@ fn mount_cgroups(
e.to_string()
);
return Err(e.into());
}
Ok(_) => {}
}
e
})?;
}
}
@ -615,9 +612,7 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
MsFlags::MS_SLAVE | MsFlags::MS_REC,
None::<&str>,
)?;
match umount2(abs_mount_point, MntFlags::MNT_DETACH) {
Ok(_) => (),
Err(e) => {
umount2(abs_mount_point, MntFlags::MNT_DETACH).or_else(|e| {
if e.ne(&nix::Error::from(Errno::EINVAL)) && e.ne(&nix::Error::from(Errno::EPERM)) {
return Err(anyhow!(e));
}
@ -631,8 +626,9 @@ pub fn ms_move_root(rootfs: &str) -> Result<bool> {
MsFlags::empty(),
None::<&str>,
)?;
}
}
Ok(())
})?;
}
mount(
@ -689,18 +685,14 @@ fn mount_from(
Path::new(&dest)
};
// let _ = fs::create_dir_all(&dir);
match fs::create_dir_all(&dir) {
Ok(_) => {}
Err(e) => {
let _ = fs::create_dir_all(&dir).map_err(|e| {
log_child!(
cfd_log,
"creat dir {}: {}",
dir.to_str().unwrap(),
e.to_string()
);
}
}
)
});
// make sure file exists so we can bind over it
if src.is_file() {
@ -717,31 +709,26 @@ fn mount_from(
}
};
match stat::stat(dest.as_str()) {
Ok(_) => {}
Err(e) => {
let _ = stat::stat(dest.as_str()).map_err(|e| {
log_child!(
cfd_log,
"dest stat error. {}: {}",
"dest stat error. {}: {:?}",
dest.as_str(),
e.as_errno().unwrap().desc()
);
}
}
e.as_errno()
)
});
match mount(
mount(
Some(src.as_str()),
dest.as_str(),
Some(m.r#type.as_str()),
flags,
Some(d.as_str()),
) {
Ok(_) => {}
Err(e) => {
log_child!(cfd_log, "mount error: {}", e.as_errno().unwrap().desc());
return Err(e.into());
}
}
)
.map_err(|e| {
log_child!(cfd_log, "mount error: {:?}", e.as_errno());
e
})?;
if flags.contains(MsFlags::MS_BIND)
&& flags.intersects(
@ -753,24 +740,17 @@ fn mount_from(
| MsFlags::MS_SLAVE),
)
{
match mount(
mount(
Some(dest.as_str()),
dest.as_str(),
None::<&str>,
flags | MsFlags::MS_REMOUNT,
None::<&str>,
) {
Err(e) => {
log_child!(
cfd_log,
"remout {}: {}",
dest.as_str(),
e.as_errno().unwrap().desc()
);
return Err(e.into());
}
Ok(_) => {}
}
)
.map_err(|e| {
log_child!(cfd_log, "remout {}: {:?}", dest.as_str(), e.as_errno());
e
})?;
}
Ok(())
}

View File

@ -143,21 +143,15 @@ pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
},
SYNC_DATA => {
let length: i32 = data_str.len() as i32;
match write_count(fd, &length.to_be_bytes(), MSG_SIZE) {
Ok(_count) => (),
Err(e) => {
write_count(fd, &length.to_be_bytes(), MSG_SIZE).or_else(|e| {
unistd::close(fd)?;
return Err(anyhow!(e).context("error in send message to process"));
}
}
Err(anyhow!(e).context("error in send message to process"))
})?;
match write_count(fd, data_str.as_bytes(), data_str.len()) {
Ok(_count) => (),
Err(e) => {
write_count(fd, data_str.as_bytes(), data_str.len()).or_else(|e| {
unistd::close(fd)?;
return Err(anyhow!(e).context("error in send message to process"));
}
}
Err(anyhow!(e).context("error in send message to process"))
})?;
}
_ => (),

View File

@ -137,17 +137,14 @@ fn get_device_name(sandbox: &Arc<Mutex<Sandbox>>, dev_addr: &str) -> Result<Stri
info!(sl!(), "Waiting on channel for device notification\n");
let hotplug_timeout = AGENT_CONFIG.read().unwrap().hotplug_timeout;
let dev_name = match rx.recv_timeout(hotplug_timeout) {
Ok(name) => name,
Err(_) => {
let dev_name = rx.recv_timeout(hotplug_timeout).map_err(|_| {
GLOBAL_DEVICE_WATCHER.lock().unwrap().remove_entry(dev_addr);
return Err(anyhow!(
anyhow!(
"Timeout reached after {:?} waiting for device {}",
hotplug_timeout,
dev_addr
));
}
};
)
})?;
Ok(format!("{}/{}", SYSTEM_DEV_PATH, &dev_name))
}
@ -214,10 +211,10 @@ fn update_spec_device_list(device: &Device, spec: &mut Spec, devidx: &DevIndex)
));
}
let linux = match spec.linux.as_mut() {
None => return Err(anyhow!("Spec didn't container linux field")),
Some(l) => l,
};
let linux = spec
.linux
.as_mut()
.ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
if !Path::new(&device.vm_path).exists() {
return Err(anyhow!("vm_path:{} doesn't exist", device.vm_path));
@ -411,10 +408,10 @@ pub fn update_device_cgroup(spec: &mut Spec) -> Result<()> {
let major = stat::major(rdev) as i64;
let minor = stat::minor(rdev) as i64;
let linux = match spec.linux.as_mut() {
None => return Err(anyhow!("Spec didn't container linux field")),
Some(l) => l,
};
let linux = spec
.linux
.as_mut()
.ok_or_else(|| anyhow!("Spec didn't container linux field"))?;
if linux.resources.is_none() {
linux.resources = Some(LinuxResources::default());

View File

@ -512,14 +512,12 @@ fn run_debug_console_shell(logger: &Logger, shell: &str, socket_fd: RawFd) -> Re
let args: Vec<&CStr> = vec![];
// run shell
if let Err(e) = unistd::execvp(cmd.as_c_str(), args.as_slice()) {
match e {
let _ = unistd::execvp(cmd.as_c_str(), args.as_slice()).map_err(|e| match e {
nix::Error::Sys(errno) => {
std::process::exit(errno as i32);
}
_ => std::process::exit(-2),
}
}
});
}
Ok(ForkResult::Parent { child: child_pid }) => {

View File

@ -251,10 +251,7 @@ fn ephemeral_storage_handler(
return Ok("".to_string());
}
if let Err(err) = fs::create_dir_all(Path::new(&storage.mount_point)) {
return Err(err.into());
}
fs::create_dir_all(Path::new(&storage.mount_point))?;
common_storage_handler(logger, storage)?;
Ok("".to_string())
@ -449,21 +446,17 @@ pub fn add_storages(
"subsystem" => "storage",
"storage-type" => handler_name.to_owned()));
let handler = match STORAGEHANDLERLIST.get(&handler_name.as_str()) {
None => {
return Err(anyhow!(
let handler = STORAGEHANDLERLIST
.get(&handler_name.as_str())
.ok_or_else(|| {
anyhow!(
"Failed to find the storage handler {}",
storage.driver.to_owned()
));
}
Some(f) => f,
};
)
})?;
let mount_point = match handler(&logger, &storage, sandbox.clone()) {
// Todo need to rollback the mounted storage if err met.
Err(e) => return Err(e),
Ok(m) => m,
};
let mount_point = handler(&logger, &storage, sandbox.clone())?;
if mount_point.len() > 0 {
mount_list.push(mount_point);
@ -482,15 +475,18 @@ fn mount_to_rootfs(logger: &Logger, m: &INIT_MOUNT) -> Result<()> {
fs::create_dir_all(Path::new(m.dest)).context("could not create directory")?;
if let Err(err) = bare_mount.mount() {
bare_mount.mount().or_else(|e| {
if m.src != "dev" {
return Err(err.into());
return Err(e);
}
error!(
logger,
"Could not mount filesystem from {} to {}", m.src, m.dest
);
}
Ok(())
})?;
Ok(())
}
@ -659,10 +655,9 @@ pub fn remove_mounts(mounts: &Vec<String>) -> Result<()> {
fn ensure_destination_exists(destination: &str, fs_type: &str) -> Result<()> {
let d = Path::new(destination);
if !d.exists() {
let dir = match d.parent() {
Some(d) => d,
None => return Err(anyhow!("mount destination {} doesn't exist", destination)),
};
let dir = d
.parent()
.ok_or_else(|| anyhow!("mount destination {} doesn't exist", destination))?;
if !dir.exists() {
fs::create_dir_all(dir).context(format!("create dir all failed on {:?}", dir))?;
}

View File

@ -3,15 +3,15 @@
// SPDX-License-Identifier: Apache-2.0
//
use anyhow::{anyhow, Result};
use nix::mount::MsFlags;
use nix::sched::{unshare, CloneFlags};
use nix::unistd::{getpid, gettid};
use std::fmt;
use std::fs;
use std::fs::File;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::thread;
use std::thread::{self};
use crate::mount::{BareMount, FLAGS};
use slog::Logger;
@ -77,10 +77,8 @@ impl Namespace {
// setup creates persistent namespace without switching to it.
// Note, pid namespaces cannot be persisted.
pub fn setup(mut self) -> Result<Self, String> {
if let Err(err) = fs::create_dir_all(&self.persistent_ns_dir) {
return Err(err.to_string());
}
pub fn setup(mut self) -> Result<Self> {
fs::create_dir_all(&self.persistent_ns_dir)?;
let ns_path = PathBuf::from(&self.persistent_ns_dir);
let ns_type = self.ns_type.clone();
@ -88,33 +86,23 @@ impl Namespace {
let new_ns_path = ns_path.join(&ns_type.get());
if let Err(err) = File::create(new_ns_path.as_path()) {
return Err(err.to_string());
}
File::create(new_ns_path.as_path())?;
self.path = new_ns_path.clone().into_os_string().into_string().unwrap();
let hostname = self.hostname.clone();
let new_thread = thread::spawn(move || {
let new_thread = thread::spawn(move || -> Result<()> {
let origin_ns_path = get_current_thread_ns_path(&ns_type.get());
let _origin_ns_fd = match File::open(Path::new(&origin_ns_path)) {
Err(err) => return Err(err.to_string()),
Ok(file) => file.as_raw_fd(),
};
File::open(Path::new(&origin_ns_path))?;
// Create a new netns on the current thread.
let cf = ns_type.get_flags().clone();
if let Err(err) = unshare(cf) {
return Err(err.to_string());
}
unshare(cf)?;
if ns_type == NamespaceType::UTS && hostname.is_some() {
match nix::unistd::sethostname(hostname.unwrap()) {
Err(err) => return Err(err.to_string()),
Ok(_) => (),
}
nix::unistd::sethostname(hostname.unwrap())?;
}
// Bind mount the new namespace from the current thread onto the mount point to persist it.
let source: &str = origin_ns_path.as_str();
@ -131,23 +119,21 @@ impl Namespace {
};
let bare_mount = BareMount::new(source, destination, "none", flags, "", &logger);
if let Err(err) = bare_mount.mount() {
return Err(format!(
bare_mount.mount().map_err(|e| {
anyhow!(
"Failed to mount {} to {} with err:{:?}",
source, destination, err
));
}
source,
destination,
e
)
})?;
Ok(())
});
match new_thread.join() {
Ok(t) => match t {
Err(err) => return Err(err),
Ok(()) => (),
},
Err(err) => return Err(format!("Failed to join thread {:?}!", err)),
}
new_thread
.join()
.map_err(|e| anyhow!("Failed to join thread {:?}!", e))??;
Ok(self)
}

View File

@ -258,15 +258,12 @@ impl agentService {
});
});
if let Err(_) = rx.recv_timeout(Duration::from_secs(req.timeout as u64)) {
return Err(anyhow!(nix::Error::from_errno(nix::errno::Errno::ETIME)));
}
rx.recv_timeout(Duration::from_secs(req.timeout as u64))
.map_err(|_| anyhow!(nix::Error::from_errno(nix::errno::Errno::ETIME)))?;
if let Err(_) = handle.join() {
return Err(anyhow!(nix::Error::from_errno(
nix::errno::Errno::UnknownErrno
)));
}
handle
.join()
.map_err(|_| anyhow!(nix::Error::from_errno(nix::errno::Errno::UnknownErrno)))?;
let s = self.sandbox.clone();
let mut sandbox = s.lock().unwrap();
@ -591,13 +588,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::WaitProcessRequest,
) -> ttrpc::Result<WaitProcessResponse> {
match self.do_wait_process(req) {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
self.do_wait_process(req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn list_processes(
@ -737,13 +730,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
"invalid container id".to_string(),
)))?;
match ctr.stats() {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
ctr.stats().map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn pause_container(
@ -797,13 +786,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::WriteStreamRequest,
) -> ttrpc::Result<WriteStreamResponse> {
match self.do_write_stream(req) {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
self.do_write_stream(req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn read_stdout(
@ -811,13 +796,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReadStreamRequest,
) -> ttrpc::Result<ReadStreamResponse> {
match self.do_read_stream(req, true) {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
self.do_read_stream(req, true).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn read_stderr(
@ -825,13 +806,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReadStreamRequest,
) -> ttrpc::Result<ReadStreamResponse> {
match self.do_read_stream(req, false) {
Err(e) => Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
))),
Ok(resp) => Ok(resp),
}
self.do_read_stream(req, false).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})
}
fn close_stdin(
@ -844,15 +821,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let p = match find_process(&mut sandbox, cid.as_str(), eid.as_str(), false) {
Ok(v) => v,
Err(e) => {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INVALID_ARGUMENT,
format!("invalid argument: {:?}", e),
)));
}
};
))
})?;
if p.term_master.is_some() {
let _ = unistd::close(p.term_master.unwrap());
@ -876,15 +850,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let eid = req.exec_id.clone();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let p = match find_process(&mut sandbox, cid.as_str(), eid.as_str(), false) {
Ok(v) => v,
Err(e) => {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::UNAVAILABLE,
format!("invalid argument: {:?}", e),
)));
}
};
))
})?;
if p.term_master.is_none() {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
@ -903,12 +874,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
};
let err = libc::ioctl(fd, TIOCSWINSZ, &win);
if let Err(e) = Errno::result(err).map(drop) {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
Errno::result(err).map(drop).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
format!("ioctl error: {:?}", e),
)));
}
))
})?;
}
Ok(Empty::new())
@ -1076,12 +1047,12 @@ impl protocols::agent_ttrpc::AgentService for agentService {
s.running = true;
if !req.guest_hook_path.is_empty() {
if let Err(e) = s.add_hooks(&req.guest_hook_path) {
let _ = s.add_hooks(&req.guest_hook_path).map_err(|e| {
error!(
sl!(),
"add guest hook {} failed: {:?}", req.guest_hook_path, e
);
}
});
}
if req.sandbox_id.len() > 0 {
@ -1189,12 +1160,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
let s = Arc::clone(&self.sandbox);
let sandbox = s.lock().unwrap();
if let Err(e) = sandbox.online_cpu_memory(&req) {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)));
}
sandbox.online_cpu_memory(&req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@ -1204,12 +1172,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::ReseedRandomDevRequest,
) -> ttrpc::Result<Empty> {
if let Err(e) = random::reseed_rng(req.data.as_slice()) {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)));
}
random::reseed_rng(req.data.as_slice()).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@ -1248,12 +1213,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::MemHotplugByProbeRequest,
) -> ttrpc::Result<Empty> {
if let Err(e) = do_mem_hotplug_by_probe(&req.memHotplugProbeAddr) {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)));
}
do_mem_hotplug_by_probe(&req.memHotplugProbeAddr).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@ -1263,12 +1225,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::SetGuestDateTimeRequest,
) -> ttrpc::Result<Empty> {
if let Err(e) = do_set_guest_date_time(req.Sec, req.Usec) {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)));
}
do_set_guest_date_time(req.Sec, req.Usec).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@ -1278,12 +1237,9 @@ impl protocols::agent_ttrpc::AgentService for agentService {
_ctx: &ttrpc::TtrpcContext,
req: protocols::agent::CopyFileRequest,
) -> ttrpc::Result<Empty> {
if let Err(e) = do_copy_file(&req) {
return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(
ttrpc::Code::INTERNAL,
e.to_string(),
)));
}
do_copy_file(&req).map_err(|e| {
ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INTERNAL, e.to_string()))
})?;
Ok(Empty::new())
}
@ -1374,13 +1330,10 @@ fn get_memory_info(block_size: bool, hotplug: bool) -> Result<(u64, bool)> {
return Err(anyhow!("Invalid block size"));
}
size = match u64::from_str_radix(v.trim(), 16) {
Ok(h) => h,
Err(_) => {
size = u64::from_str_radix(v.trim(), 16).map_err(|_| {
warn!(sl!(), "failed to parse the str {} to hex", size);
return Err(anyhow!("Invalid block size"));
}
};
anyhow!("Invalid block size")
})?;
}
Err(e) => {
info!(sl!(), "memory block size error: {:?}", e.kind());
@ -1649,12 +1602,14 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
PathBuf::from("/")
};
if let Err(e) = fs::create_dir_all(dir.to_str().unwrap()) {
fs::create_dir_all(dir.to_str().unwrap()).or_else(|e| {
if e.kind() != std::io::ErrorKind::AlreadyExists {
return Err(e.into());
}
return Err(e);
}
Ok(())
})?;
std::fs::set_permissions(
dir.to_str().unwrap(),
std::fs::Permissions::from_mode(req.dir_mode),

View File

@ -143,16 +143,10 @@ impl Sandbox {
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
match self.unset_sandbox_storage(path) {
Ok(res) => {
if res {
if self.unset_sandbox_storage(path)? {
return self.remove_sandbox_storage(path);
}
}
Err(err) => {
return Err(err);
}
}
Ok(())
}
@ -166,23 +160,17 @@ impl Sandbox {
pub fn setup_shared_namespaces(&mut self) -> Result<bool> {
// Set up shared IPC namespace
self.shared_ipcns = match Namespace::new(&self.logger).as_ipc().setup() {
Ok(ns) => ns,
Err(err) => {
return Err(anyhow!(err).context("Failed to setup persistent IPC namespace"));
}
};
self.shared_ipcns = Namespace::new(&self.logger)
.as_ipc()
.setup()
.context("Failed to setup persistent IPC namespace")?;
// // Set up shared UTS namespace
self.shared_utsns = match Namespace::new(&self.logger)
self.shared_utsns = Namespace::new(&self.logger)
.as_uts(self.hostname.as_str())
.setup()
{
Ok(ns) => ns,
Err(err) => {
return Err(anyhow!(err).context("Failed to setup persistent UTS namespace"));
}
};
.context("Failed to setup persistent UTS namespace")?;
Ok(true)
}
@ -316,10 +304,9 @@ impl Sandbox {
thread::spawn(move || {
for event in rx {
info!(logger, "got an OOM event {:?}", event);
match tx.send(container_id.clone()) {
Err(err) => error!(logger, "failed to send message: {:?}", err),
Ok(_) => {}
}
let _ = tx
.send(container_id.clone())
.map_err(|e| error!(logger, "failed to send message: {:?}", e));
}
});
}

View File

@ -99,14 +99,14 @@ impl Uevent {
let online_path = format!("{}/{}/online", SYSFS_DIR, &self.devpath);
// It's a memory hot-add event.
if online_path.starts_with(SYSFS_MEMORY_ONLINE_PATH) {
if let Err(e) = online_device(online_path.as_ref()) {
let _ = online_device(online_path.as_ref()).map_err(|e| {
error!(
*logger,
"failed to online device";
"device" => &self.devpath,
"error" => format!("{}", e),
);
}
)
});
return;
}
}