mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-05-08 16:37:32 +00:00
rustjail: refactoring the way of creating container process
In the previous implementation, create a container process by forking the parent process as the container process, and then at the forked child process do much more setting, such as rootfs mounting, drop capabilities and so on, at last exec the container entry cmd to switch into container process. But since the parent is a muti thread process, which would cause a dead lock in the forked child. For example, if one of the parent process's thread do some malloc operation, which would take a mutex lock, and at the same time, the parent forked a child process, since the mutex lock status would be inherited by the child process but there's no chance to release the lock in the child since the child process only has a single thread which would meet a dead lock if it would do some malloc operation. Thus, the new implementation would do exec directly after forked and then do the setting in the exec process. Of course, this requred a data communication between parent and child since the child cannot depends on the shared memory by fork way. Fixes: #166 Fixes: #133 Signed-off-by: fupan.lfp <fupan.lfp@antfin.com>
This commit is contained in:
parent
e56b10f835
commit
c1b6838e25
@ -9,10 +9,12 @@
|
||||
use lazy_static;
|
||||
|
||||
use crate::errors::*;
|
||||
use crate::log_child;
|
||||
use crate::sync::write_count;
|
||||
use caps::{self, CapSet, Capability, CapsHashSet};
|
||||
use oci::LinuxCapabilities;
|
||||
use slog::Logger;
|
||||
use std::collections::HashMap;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CAPSMAP: HashMap<String, Capability> = {
|
||||
@ -76,14 +78,14 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
fn to_capshashset(logger: &Logger, caps: &[String]) -> CapsHashSet {
|
||||
fn to_capshashset(cfd_log: RawFd, caps: &[String]) -> CapsHashSet {
|
||||
let mut r = CapsHashSet::new();
|
||||
|
||||
for cap in caps.iter() {
|
||||
let c = CAPSMAP.get(cap);
|
||||
|
||||
if c.is_none() {
|
||||
warn!(logger, "{} is not a cap", cap);
|
||||
log_child!(cfd_log, "{} is not a cap", cap);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -98,37 +100,35 @@ pub fn reset_effective() -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn drop_priviledges(logger: &Logger, caps: &LinuxCapabilities) -> Result<()> {
|
||||
let logger = logger.new(o!("subsystem" => "capabilities"));
|
||||
|
||||
pub fn drop_priviledges(cfd_log: RawFd, caps: &LinuxCapabilities) -> Result<()> {
|
||||
let all = caps::all();
|
||||
|
||||
for c in all.difference(&to_capshashset(&logger, caps.bounding.as_ref())) {
|
||||
for c in all.difference(&to_capshashset(cfd_log, caps.bounding.as_ref())) {
|
||||
caps::drop(None, CapSet::Bounding, *c)?;
|
||||
}
|
||||
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Effective,
|
||||
to_capshashset(&logger, caps.effective.as_ref()),
|
||||
to_capshashset(cfd_log, caps.effective.as_ref()),
|
||||
)?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Permitted,
|
||||
to_capshashset(&logger, caps.permitted.as_ref()),
|
||||
to_capshashset(cfd_log, caps.permitted.as_ref()),
|
||||
)?;
|
||||
caps::set(
|
||||
None,
|
||||
CapSet::Inheritable,
|
||||
to_capshashset(&logger, caps.inheritable.as_ref()),
|
||||
to_capshashset(cfd_log, caps.inheritable.as_ref()),
|
||||
)?;
|
||||
|
||||
if let Err(_) = caps::set(
|
||||
None,
|
||||
CapSet::Ambient,
|
||||
to_capshashset(&logger, caps.ambient.as_ref()),
|
||||
to_capshashset(cfd_log, caps.ambient.as_ref()),
|
||||
) {
|
||||
warn!(logger, "failed to set ambient capability");
|
||||
log_child!(cfd_log, "failed to set ambient capability");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -2,7 +2,6 @@
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::cgroups::FreezerState;
|
||||
use crate::cgroups::Manager as CgroupManager;
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
@ -19,6 +18,7 @@ use protocols::agent::{
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
macro_rules! sl {
|
||||
@ -207,7 +207,7 @@ fn parse_size(s: &str, m: &HashMap<String, u128>) -> Result<u128> {
|
||||
|
||||
fn custom_size(mut size: f64, base: f64, m: &Vec<String>) -> String {
|
||||
let mut i = 0;
|
||||
while size > base {
|
||||
while size >= base && i < m.len() - 1 {
|
||||
size /= base;
|
||||
i += 1;
|
||||
}
|
||||
@ -307,7 +307,6 @@ where
|
||||
T: ToString,
|
||||
{
|
||||
let p = format!("{}/{}", dir, file);
|
||||
info!(sl!(), "{}", p.as_str());
|
||||
fs::write(p.as_str(), v.to_string().as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
@ -936,6 +935,11 @@ fn get_blkio_stat(dir: &str, file: &str) -> Result<RepeatedField<BlkioStatsEntry
|
||||
let p = format!("{}/{}", dir, file);
|
||||
let mut m = RepeatedField::new();
|
||||
|
||||
// do as runc
|
||||
if !Path::new(&p).exists() {
|
||||
return Ok(RepeatedField::new());
|
||||
}
|
||||
|
||||
for l in fs::read_to_string(p.as_str())?.lines() {
|
||||
let parts: Vec<&str> = l.split(' ').collect();
|
||||
|
||||
@ -1224,7 +1228,7 @@ fn get_all_procs(dir: &str) -> Result<Vec<i32>> {
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct Manager {
|
||||
pub paths: HashMap<String, String>,
|
||||
pub mounts: HashMap<String, String>,
|
||||
@ -1238,7 +1242,6 @@ pub const FROZEN: &'static str = "FROZEN";
|
||||
impl CgroupManager for Manager {
|
||||
fn apply(&self, pid: pid_t) -> Result<()> {
|
||||
for (key, value) in &self.paths {
|
||||
info!(sl!(), "apply cgroup {}", key);
|
||||
apply(value, pid)?;
|
||||
}
|
||||
|
||||
@ -1249,7 +1252,6 @@ impl CgroupManager for Manager {
|
||||
for (key, value) in &self.paths {
|
||||
let _ = fs::create_dir_all(value);
|
||||
let sub = get_subsystem(key)?;
|
||||
info!(sl!(), "setting cgroup {}", key);
|
||||
sub.set(value, spec, update)?;
|
||||
}
|
||||
|
||||
@ -1301,9 +1303,16 @@ impl CgroupManager for Manager {
|
||||
};
|
||||
|
||||
// BlkioStats
|
||||
// note that virtiofs has no blkio stats
|
||||
info!(sl!(), "blkio_stats");
|
||||
let blkio_stats = if self.paths.get("blkio").is_some() {
|
||||
SingularPtrField::some(Blkio().get_stats(self.paths.get("blkio").unwrap())?)
|
||||
match Blkio().get_stats(self.paths.get("blkio").unwrap()) {
|
||||
Ok(stat) => SingularPtrField::some(stat),
|
||||
Err(e) => {
|
||||
warn!(sl!(), "failed to get blkio stats");
|
||||
SingularPtrField::none()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SingularPtrField::none()
|
||||
};
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,11 +16,13 @@ error_chain! {
|
||||
Ffi(std::ffi::NulError);
|
||||
Caps(caps::errors::Error);
|
||||
Serde(serde_json::Error);
|
||||
UTF8(std::string::FromUtf8Error);
|
||||
FromUTF8(std::string::FromUtf8Error);
|
||||
Parse(std::num::ParseIntError);
|
||||
Scanfmt(scan_fmt::parse::ScanError);
|
||||
Ip(std::net::AddrParseError);
|
||||
Regex(regex::Error);
|
||||
EnvVar(std::env::VarError);
|
||||
UTF8(std::str::Utf8Error);
|
||||
}
|
||||
// define new errors
|
||||
errors {
|
||||
|
@ -42,14 +42,14 @@ macro_rules! sl {
|
||||
};
|
||||
}
|
||||
|
||||
pub mod capabilities;
|
||||
pub mod cgroups;
|
||||
pub mod container;
|
||||
pub mod errors;
|
||||
pub mod mount;
|
||||
pub mod process;
|
||||
pub mod specconv;
|
||||
// pub mod sync;
|
||||
pub mod capabilities;
|
||||
pub mod sync;
|
||||
pub mod validator;
|
||||
|
||||
// pub mod factory;
|
||||
@ -66,8 +66,6 @@ pub mod validator;
|
||||
// construtc ociSpec from grpcSpec, which is needed for hook
|
||||
// execution. since hooks read config.json
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::mem::MaybeUninit;
|
||||
use oci::{
|
||||
Box as ociBox, Hooks as ociHooks, Linux as ociLinux, LinuxCapabilities as ociLinuxCapabilities,
|
||||
Mount as ociMount, POSIXRlimit as ociPOSIXRlimit, Process as ociProcess, Root as ociRoot,
|
||||
@ -77,6 +75,8 @@ use protocols::oci::{
|
||||
Hooks as grpcHooks, Linux as grpcLinux, Mount as grpcMount, Process as grpcProcess,
|
||||
Root as grpcRoot, Spec as grpcSpec,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub fn process_grpc_to_oci(p: &grpcProcess) -> ociProcess {
|
||||
let console_size = if p.ConsoleSize.is_some() {
|
||||
|
@ -14,6 +14,7 @@ use oci::{LinuxDevice, Mount, Spec};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::os::unix;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use path_absolutize::*;
|
||||
@ -23,10 +24,11 @@ use std::io::{BufRead, BufReader};
|
||||
|
||||
use crate::container::DEFAULT_DEVICES;
|
||||
use crate::errors::*;
|
||||
use crate::sync::write_count;
|
||||
use lazy_static;
|
||||
use std::string::ToString;
|
||||
|
||||
use slog::Logger;
|
||||
use crate::log_child;
|
||||
|
||||
// Info reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
@ -97,7 +99,7 @@ lazy_static! {
|
||||
}
|
||||
|
||||
pub fn init_rootfs(
|
||||
logger: &Logger,
|
||||
cfd_log: RawFd,
|
||||
spec: &Spec,
|
||||
cpath: &HashMap<String, String>,
|
||||
mounts: &HashMap<String, String>,
|
||||
@ -133,13 +135,13 @@ pub fn init_rootfs(
|
||||
return Err(ErrorKind::Nix(nix::Error::Sys(Errno::EINVAL)).into());
|
||||
}
|
||||
if m.r#type == "cgroup" {
|
||||
mount_cgroups(logger, &m, rootfs, flags, &data, cpath, mounts)?;
|
||||
mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?;
|
||||
} else {
|
||||
if m.destination == "/dev" {
|
||||
flags &= !MsFlags::MS_RDONLY;
|
||||
}
|
||||
|
||||
mount_from(&m, &rootfs, flags, &data, "")?;
|
||||
mount_from(cfd_log, &m, &rootfs, flags, &data, "")?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,7 +158,7 @@ pub fn init_rootfs(
|
||||
}
|
||||
|
||||
fn mount_cgroups(
|
||||
logger: &Logger,
|
||||
cfd_log: RawFd,
|
||||
m: &Mount,
|
||||
rootfs: &str,
|
||||
flags: MsFlags,
|
||||
@ -173,8 +175,8 @@ fn mount_cgroups(
|
||||
};
|
||||
|
||||
let cflags = MsFlags::MS_NOEXEC | MsFlags::MS_NOSUID | MsFlags::MS_NODEV;
|
||||
info!(logger, "tmpfs");
|
||||
mount_from(&ctm, rootfs, cflags, "", "")?;
|
||||
// info!(logger, "tmpfs");
|
||||
mount_from(cfd_log, &ctm, rootfs, cflags, "", "")?;
|
||||
let olddir = unistd::getcwd()?;
|
||||
|
||||
unistd::chdir(rootfs)?;
|
||||
@ -183,7 +185,7 @@ fn mount_cgroups(
|
||||
|
||||
// bind mount cgroups
|
||||
for (key, mount) in mounts.iter() {
|
||||
info!(logger, "{}", key);
|
||||
log_child!(cfd_log, "mount cgroup subsystem {}", key);
|
||||
let source = if cpath.get(key).is_some() {
|
||||
cpath.get(key).unwrap()
|
||||
} else {
|
||||
@ -210,7 +212,7 @@ fn mount_cgroups(
|
||||
|
||||
srcs.insert(source.to_string());
|
||||
|
||||
info!(logger, "{}", destination.as_str());
|
||||
log_child!(cfd_log, "mount destination: {}", destination.as_str());
|
||||
|
||||
let bm = Mount {
|
||||
source: source.to_string(),
|
||||
@ -219,25 +221,24 @@ fn mount_cgroups(
|
||||
options: Vec::new(),
|
||||
};
|
||||
|
||||
mount_from(
|
||||
&bm,
|
||||
rootfs,
|
||||
flags | MsFlags::MS_REC | MsFlags::MS_BIND,
|
||||
"",
|
||||
"",
|
||||
)?;
|
||||
let mut mount_flags: MsFlags = flags | MsFlags::MS_REC | MsFlags::MS_BIND;
|
||||
if key.contains("systemd") {
|
||||
mount_flags &= !MsFlags::MS_RDONLY;
|
||||
}
|
||||
mount_from(cfd_log, &bm, rootfs, mount_flags, "", "")?;
|
||||
|
||||
if key != base {
|
||||
let src = format!("{}/{}", m.destination.as_str(), key);
|
||||
match unix::fs::symlink(destination.as_str(), &src[1..]) {
|
||||
Err(e) => {
|
||||
info!(
|
||||
logger,
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"symlink: {} {} err: {}",
|
||||
key,
|
||||
destination.as_str(),
|
||||
e.to_string()
|
||||
);
|
||||
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(_) => {}
|
||||
@ -421,7 +422,14 @@ fn parse_mount(m: &Mount) -> (MsFlags, String) {
|
||||
(flags, data.join(","))
|
||||
}
|
||||
|
||||
fn mount_from(m: &Mount, rootfs: &str, flags: MsFlags, data: &str, _label: &str) -> Result<()> {
|
||||
fn mount_from(
|
||||
cfd_log: RawFd,
|
||||
m: &Mount,
|
||||
rootfs: &str,
|
||||
flags: MsFlags,
|
||||
data: &str,
|
||||
_label: &str,
|
||||
) -> Result<()> {
|
||||
let d = String::from(data);
|
||||
let dest = format!("{}{}", rootfs, &m.destination);
|
||||
|
||||
@ -437,8 +445,8 @@ fn mount_from(m: &Mount, rootfs: &str, flags: MsFlags, data: &str, _label: &str)
|
||||
match fs::create_dir_all(&dir) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!(
|
||||
sl!(),
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"creat dir {}: {}",
|
||||
dir.to_str().unwrap(),
|
||||
e.to_string()
|
||||
@ -456,8 +464,6 @@ fn mount_from(m: &Mount, rootfs: &str, flags: MsFlags, data: &str, _label: &str)
|
||||
PathBuf::from(&m.source)
|
||||
};
|
||||
|
||||
info!(sl!(), "{}, {}", src.to_str().unwrap(), dest.as_str());
|
||||
|
||||
// ignore this check since some mount's src didn't been a directory
|
||||
// such as tmpfs.
|
||||
/*
|
||||
@ -472,7 +478,12 @@ fn mount_from(m: &Mount, rootfs: &str, flags: MsFlags, data: &str, _label: &str)
|
||||
match stat::stat(dest.as_str()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!(sl!(), "{}: {}", dest.as_str(), e.as_errno().unwrap().desc());
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"{}: {}",
|
||||
dest.as_str(),
|
||||
e.as_errno().unwrap().desc()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -485,7 +496,7 @@ fn mount_from(m: &Mount, rootfs: &str, flags: MsFlags, data: &str, _label: &str)
|
||||
) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
info!(sl!(), "mount error: {}", e.as_errno().unwrap().desc());
|
||||
log_child!(cfd_log, "mount error: {}", e.as_errno().unwrap().desc());
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
@ -508,8 +519,8 @@ fn mount_from(m: &Mount, rootfs: &str, flags: MsFlags, data: &str, _label: &str)
|
||||
None::<&str>,
|
||||
) {
|
||||
Err(e) => {
|
||||
info!(
|
||||
sl!(),
|
||||
log_child!(
|
||||
cfd_log,
|
||||
"remout {}: {}",
|
||||
dest.as_str(),
|
||||
e.as_errno().unwrap().desc()
|
||||
@ -616,9 +627,9 @@ fn bind_dev(dev: &LinuxDevice) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn finish_rootfs(spec: &Spec) -> Result<()> {
|
||||
pub fn finish_rootfs(cfd_log: RawFd, spec: &Spec) -> Result<()> {
|
||||
let olddir = unistd::getcwd()?;
|
||||
info!(sl!(), "{}", olddir.to_str().unwrap());
|
||||
log_child!(cfd_log, "old cwd: {}", olddir.to_str().unwrap());
|
||||
unistd::chdir("/")?;
|
||||
if spec.linux.is_some() {
|
||||
let linux = spec.linux.as_ref().unwrap();
|
||||
|
@ -34,10 +34,8 @@ pub struct Process {
|
||||
pub extra_files: Vec<File>,
|
||||
// pub caps: Capabilities,
|
||||
// pub rlimits: Vec<Rlimit>,
|
||||
pub console_socket: Option<RawFd>,
|
||||
pub term_master: Option<RawFd>,
|
||||
// parent end of fds
|
||||
pub parent_console_socket: Option<RawFd>,
|
||||
pub tty: bool,
|
||||
pub parent_stdin: Option<RawFd>,
|
||||
pub parent_stdout: Option<RawFd>,
|
||||
pub parent_stderr: Option<RawFd>,
|
||||
@ -89,9 +87,8 @@ impl Process {
|
||||
exit_pipe_w: None,
|
||||
exit_pipe_r: None,
|
||||
extra_files: Vec::new(),
|
||||
console_socket: None,
|
||||
tty: ocip.terminal,
|
||||
term_master: None,
|
||||
parent_console_socket: None,
|
||||
parent_stdin: None,
|
||||
parent_stdout: None,
|
||||
parent_stderr: None,
|
||||
@ -104,44 +101,21 @@ impl Process {
|
||||
|
||||
info!(logger, "before create console socket!");
|
||||
|
||||
if ocip.terminal {
|
||||
let (psocket, csocket) = match socket::socketpair(
|
||||
AddressFamily::Unix,
|
||||
SockType::Stream,
|
||||
None,
|
||||
SockFlag::SOCK_CLOEXEC,
|
||||
) {
|
||||
Ok((u, v)) => (u, v),
|
||||
Err(e) => {
|
||||
match e {
|
||||
Error::Sys(errno) => {
|
||||
info!(logger, "socketpair: {}", errno.desc());
|
||||
}
|
||||
_ => {
|
||||
info!(logger, "socketpair: other error!");
|
||||
}
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
p.parent_console_socket = Some(psocket);
|
||||
p.console_socket = Some(csocket);
|
||||
if !p.tty {
|
||||
info!(logger, "created console socket!");
|
||||
|
||||
let (stdin, pstdin) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
p.parent_stdin = Some(pstdin);
|
||||
p.stdin = Some(stdin);
|
||||
|
||||
let (pstdout, stdout) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
|
||||
p.parent_stdout = Some(pstdout);
|
||||
p.stdout = Some(stdout);
|
||||
|
||||
let (pstderr, stderr) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
|
||||
p.parent_stderr = Some(pstderr);
|
||||
p.stderr = Some(stderr);
|
||||
}
|
||||
|
||||
info!(logger, "created console socket!");
|
||||
|
||||
let (stdin, pstdin) = unistd::pipe2(OFlag::O_CLOEXEC)?;
|
||||
p.parent_stdin = Some(pstdin);
|
||||
p.stdin = Some(stdin);
|
||||
|
||||
let (pstdout, stdout) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
|
||||
p.parent_stdout = Some(pstdout);
|
||||
p.stdout = Some(stdout);
|
||||
|
||||
let (pstderr, stderr) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
|
||||
p.parent_stderr = Some(pstderr);
|
||||
p.stderr = Some(stderr);
|
||||
|
||||
Ok(p)
|
||||
}
|
||||
}
|
||||
|
177
src/agent/rustjail/src/sync.rs
Normal file
177
src/agent/rustjail/src/sync.rs
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::errors::*;
|
||||
use nix::errno::Errno;
|
||||
use nix::unistd;
|
||||
use nix::Error;
|
||||
use std::mem;
|
||||
use std::os::unix::io::RawFd;
|
||||
|
||||
pub const SYNC_SUCCESS: i32 = 1;
|
||||
pub const SYNC_FAILED: i32 = 2;
|
||||
pub const SYNC_DATA: i32 = 3;
|
||||
|
||||
const DATA_SIZE: usize = 100;
|
||||
const MSG_SIZE: usize = mem::size_of::<i32>();
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_child {
|
||||
($fd:expr, $($arg:tt)+) => ({
|
||||
let lfd = $fd;
|
||||
let mut log_str = format_args!($($arg)+).to_string();
|
||||
log_str.push('\n');
|
||||
write_count(lfd, log_str.as_bytes(), log_str.len());
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write_count(fd: RawFd, buf: &[u8], count: usize) -> Result<usize> {
|
||||
let mut len = 0;
|
||||
|
||||
loop {
|
||||
match unistd::write(fd, &buf[len..]) {
|
||||
Ok(l) => {
|
||||
len += l;
|
||||
if len == count {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
if e != Error::from_errno(Errno::EINTR) {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(len)
|
||||
}
|
||||
|
||||
fn read_count(fd: RawFd, count: usize) -> Result<Vec<u8>> {
|
||||
let mut v: Vec<u8> = vec![0; count];
|
||||
let mut len = 0;
|
||||
|
||||
loop {
|
||||
match unistd::read(fd, &mut v[len..]) {
|
||||
Ok(l) => {
|
||||
len += l;
|
||||
if len == count || l == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
if e != Error::from_errno(Errno::EINTR) {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(v[0..len].to_vec())
|
||||
}
|
||||
|
||||
pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
if buf.len() != MSG_SIZE {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"process: {} failed to receive sync message from peer: got msg length: {}, expected: {}",
|
||||
std::process::id(),
|
||||
buf.len(),
|
||||
MSG_SIZE
|
||||
))
|
||||
.into());
|
||||
}
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg: i32 = i32::from_be_bytes(buf_array);
|
||||
match msg {
|
||||
SYNC_SUCCESS => return Ok(Vec::new()),
|
||||
SYNC_DATA => {
|
||||
let buf = read_count(fd, MSG_SIZE)?;
|
||||
let buf_array: [u8; MSG_SIZE] = [buf[0], buf[1], buf[2], buf[3]];
|
||||
let msg_length: i32 = i32::from_be_bytes(buf_array);
|
||||
let data_buf = read_count(fd, msg_length as usize)?;
|
||||
|
||||
return Ok(data_buf);
|
||||
}
|
||||
SYNC_FAILED => {
|
||||
let mut error_buf = vec![];
|
||||
loop {
|
||||
let buf = read_count(fd, DATA_SIZE)?;
|
||||
|
||||
error_buf.extend(&buf);
|
||||
if DATA_SIZE == buf.len() {
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let error_str = match std::str::from_utf8(&error_buf) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(ErrorKind::ErrorCode(format!(
|
||||
"receive error message from child process failed: {:?}",
|
||||
e
|
||||
))
|
||||
.into())
|
||||
}
|
||||
};
|
||||
|
||||
return Err(ErrorKind::ErrorCode(String::from(error_str)).into());
|
||||
}
|
||||
_ => return Err(ErrorKind::ErrorCode("error in receive sync message".to_string()).into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write_sync(fd: RawFd, msg_type: i32, data_str: &str) -> Result<()> {
|
||||
let buf = msg_type.to_be_bytes();
|
||||
|
||||
let count = write_count(fd, &buf, MSG_SIZE)?;
|
||||
if count != MSG_SIZE {
|
||||
return Err(ErrorKind::ErrorCode("error in send sync message".to_string()).into());
|
||||
}
|
||||
|
||||
match msg_type {
|
||||
SYNC_FAILED => match write_count(fd, data_str.as_bytes(), data_str.len()) {
|
||||
Ok(_count) => unistd::close(fd)?,
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(
|
||||
ErrorKind::ErrorCode("error in send message to process".to_string()).into(),
|
||||
);
|
||||
}
|
||||
},
|
||||
SYNC_DATA => {
|
||||
let length: i32 = data_str.len() as i32;
|
||||
match write_count(fd, &length.to_be_bytes(), MSG_SIZE) {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(ErrorKind::ErrorCode(
|
||||
"error in send message to process".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
|
||||
match write_count(fd, data_str.as_bytes(), data_str.len()) {
|
||||
Ok(_count) => (),
|
||||
Err(e) => {
|
||||
unistd::close(fd)?;
|
||||
return Err(ErrorKind::ErrorCode(
|
||||
"error in send message to process".to_string(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ => (),
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,9 +1,15 @@
|
||||
// Copyright (c) 2019 Ant Financial
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
use crate::container::Config;
|
||||
use crate::errors::*;
|
||||
use lazy_static;
|
||||
use nix::errno::Errno;
|
||||
use nix::Error;
|
||||
use oci::{LinuxIDMapping, LinuxNamespace, Spec};
|
||||
use protobuf::RepeatedField;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Component, PathBuf};
|
||||
|
||||
|
@ -14,8 +14,8 @@ use crate::linux_abi::*;
|
||||
use crate::mount::{DRIVERBLKTYPE, DRIVERMMIOBLKTYPE, DRIVERNVDIMMTYPE, DRIVERSCSITYPE};
|
||||
use crate::sandbox::Sandbox;
|
||||
use crate::{AGENT_CONFIG, GLOBAL_DEVICE_WATCHER};
|
||||
use protocols::agent::Device;
|
||||
use oci::Spec;
|
||||
use protocols::agent::Device;
|
||||
use rustjail::errors::*;
|
||||
|
||||
// Convenience macro to obtain the scope logger
|
||||
|
@ -578,11 +578,11 @@ impl protocols::agent_grpc::AgentService for agentService {
|
||||
req: protocols::agent::ExecProcessRequest,
|
||||
sink: ::grpcio::UnarySink<protocols::empty::Empty>,
|
||||
) {
|
||||
if let Err(_) = self.do_exec_process(req) {
|
||||
if let Err(e) = self.do_exec_process(req) {
|
||||
let f = sink
|
||||
.fail(RpcStatus::new(
|
||||
RpcStatusCode::Internal,
|
||||
Some(String::from("fail to exec process!")),
|
||||
Some(format!("{}", e)),
|
||||
))
|
||||
.map_err(|_e| error!(sl!(), "fail to exec process!"));
|
||||
ctx.spawn(f);
|
||||
|
@ -10,14 +10,14 @@
|
||||
#![allow(non_snake_case)]
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate oci;
|
||||
extern crate prctl;
|
||||
extern crate protocols;
|
||||
extern crate regex;
|
||||
extern crate rustjail;
|
||||
extern crate scan_fmt;
|
||||
extern crate serde_json;
|
||||
extern crate signal_hook;
|
||||
extern crate scan_fmt;
|
||||
extern crate oci;
|
||||
|
||||
#[macro_use]
|
||||
extern crate scopeguard;
|
||||
@ -100,6 +100,10 @@ fn announce(logger: &Logger) {
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
if args.len() == 2 && args[1] == "init" {
|
||||
rustjail::container::init_child();
|
||||
exit(0);
|
||||
}
|
||||
|
||||
env::set_var("RUST_BACKTRACE", "full");
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user