Compare commits

..

15 Commits

Author SHA1 Message Date
Jose Carlos Venegas Munoz
93428ca15f Merge pull request #213 from jcvenegas/backport-1.10-fix-release
release: actions: pin artifact to v1
2020-05-07 11:01:08 -05:00
Jose Carlos Venegas Munoz
486ebf73f8 release: actions: pin artifact to v1
the actions upload/download-artifact moved to a new version
and master now is not comptible.

Fixes: #211

Signed-off-by: Jose Carlos Venegas Munoz <jose.carlos.venegas.munoz@intel.com>
2020-05-07 15:38:26 +00:00
Jose Carlos Venegas Munoz
57ac170ca5 Merge pull request #208 from katabuilder/1.10.4-branch-bump
# Kata Containers 1.10.4
2020-05-06 12:26:57 -05:00
katabuilder
31d2b77836 release: Kata Containers 1.10.4
Version bump no changes

Signed-off-by: katabuilder <katabuilder@katacontainers.io>
2020-05-05 20:53:35 +00:00
Salvador Fuentes
2a77d1730d Merge pull request #182 from chavafg/1.10.3-branch-bump
# Kata Containers 1.10.3
2020-04-17 18:13:36 -05:00
Archana Shinde
1199642234 Merge pull request #185 from chavafg/topic/fix-license
license: add license header
2020-04-17 15:59:46 -07:00
Salvador Fuentes
c6551f67ad license: add license header
add license header to `src/agent/rustjail/src/validator.rs`

Fixes: #184.

Signed-off-by: Salvador Fuentes <salvador.fuentes@intel.com>
2020-04-17 16:51:55 -05:00
Salvador Fuentes
10759b7251 release: Kata Containers 1.10.3
failed to get logs
Version bump no changes

Signed-off-by: Salvador Fuentes <salvador.fuentes@intel.com>
2020-04-17 17:54:16 +00:00
Xu Wang
f969e5645d Merge pull request #161 from bergwolf/1.10.2-branch-bump
# Kata Containers 1.10.2
2020-03-18 11:04:40 +08:00
Peng Tao
b7ece51b3f release: Kata Containers 1.10.2
537ecbe path-absolutize: version update

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-03-18 10:47:57 +08:00
Penny Zheng
537ecbee8e path-absolutize: version update
The latest tag version v1.2.0 fixes the error of inapporiately using
mutable static.

Fixes: #144

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
2020-03-18 10:46:22 +08:00
Archana Shinde
aaff08d425 Merge pull request #140 from amshinde/1.10.1-branch-bump
# Kata Containers 1.10.1
2020-02-18 14:08:27 -08:00
Archana Shinde
44f0052967 release: Kata Containers 1.10.1
Version bump no changes

Signed-off-by: Archana Shinde <archana.m.shinde@intel.com>
2020-02-18 19:21:10 +00:00
GabyCT
85b3a67215 Merge pull request #121 from jcvenegas/1.10.0-branch-bump
# Kata Containers 1.10.0
2020-01-14 14:00:08 -06:00
Jose Carlos Venegas Munoz
fc0deb5b9a release: Kata Containers 1.10.0
Starting to version this repository

Signed-off-by: Jose Carlos Venegas Munoz <jose.carlos.venegas.munoz@intel.com>
2020-01-14 19:16:13 +00:00
17 changed files with 124 additions and 1331 deletions

View File

@@ -16,7 +16,7 @@ jobs:
popd
./packaging/artifact-list.sh > artifact-list.txt
- name: save-artifact-list
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: artifact-list
path: artifact-list.txt
@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- run: |
@@ -44,7 +44,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-kernel.tar.gz
@@ -57,7 +57,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- run: |
@@ -72,7 +72,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-experimental-kernel.tar.gz
@@ -85,7 +85,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-qemu
@@ -98,7 +98,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-qemu.tar.gz
@@ -111,7 +111,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-nemu
@@ -124,7 +124,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-nemu.tar.gz
@@ -138,7 +138,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-qemu-virtiofsd
@@ -151,7 +151,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-qemu-virtiofsd.tar.gz
@@ -165,7 +165,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-image
@@ -178,7 +178,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-image.tar.gz
@@ -192,7 +192,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-firecracker
@@ -205,7 +205,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-firecracker.tar.gz
@@ -219,7 +219,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-clh
@@ -232,7 +232,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-clh.tar.gz
@@ -246,7 +246,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-kata-components
@@ -259,7 +259,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-kata-components.tar.gz
@@ -270,14 +270,14 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifacts
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: kata-artifacts
- name: colate-artifacts
run: |
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
- name: store-artifacts
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: release-candidate
path: kata-static.tar.xz
@@ -287,7 +287,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: get-artifacts
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: release-candidate
- name: build-and-push-kata-deploy-ci
@@ -328,7 +328,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: download-artifacts
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: release-candidate
- name: install hub

View File

@@ -1 +1 @@
1.11.0-alpha0
1.10.4

View File

@@ -28,6 +28,12 @@ run_static_checks()
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/kata-containers"
}
run_rust_test()
{
clone_tests_repo
bash "$tests_repo_dir/.ci/rust-test.sh"
}
run_go_test()
{
clone_tests_repo

View File

@@ -1,15 +0,0 @@
## Copyright (c) 2020 ARM Limited
##
## SPDX-License-Identifier: Apache-2.0
##
[target.aarch64-unknown-linux-musl]
## Only setting linker with `aarch64-linux-musl-gcc`, the
## `rust-agent` could be totally statically linked.
linker = "aarch64-linux-musl-gcc"
## The __addtf3, __subtf3 and __multf3 symbols are used by aarch64-musl,
## but are not provided by rust compiler-builtins.
## For now, the only functional workaround accepted by rust communities
## is to get them from libgcc.
rustflags = [ "-C", "link-arg=-lgcc" ]

View File

@@ -6,7 +6,6 @@ edition = "2018"
[dependencies]
oci = { path = "oci" }
logging = { path = "logging" }
rustjail = { path = "rustjail" }
protocols = { path = "protocols" }
netlink = { path = "netlink" }
@@ -16,18 +15,19 @@ grpcio = { git="https://github.com/alipay/grpc-rs", branch="rust_agent" }
protobuf = "2.6.1"
futures = "0.1.27"
libc = "0.2.58"
nix = "0.17.0"
nix = "0.14.1"
prctl = "1.0.0"
serde_json = "1.0.39"
signal-hook = "0.1.9"
scan_fmt = "0.2.3"
scopeguard = "1.0.0"
regex = "1"
# slog:
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
# - The 'max_*' features allow changing the log level at runtime
# (by stopping the compiler from removing log calls).
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
slog-json = "2.3.0"
slog-async = "2.3.0"
slog-scope = "4.1.2"
# for testing
tempfile = "3.1.0"

View File

@@ -1,20 +0,0 @@
[package]
name = "logging"
version = "0.1.0"
authors = ["Tim Zhang <tim@hyper.sh>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
serde_json = "1.0.39"
# slog:
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
# - The 'max_*' features allow changing the log level at runtime
# (by stopping the compiler from removing log calls).
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
slog-json = "2.3.0"
slog-async = "2.3.0"
slog-scope = "4.1.2"
# for testing
tempfile = "3.1.0"

View File

@@ -8,7 +8,7 @@ edition = "2018"
[dependencies]
libc = "0.2.58"
nix = "0.17.0"
nix = "0.14.1"
protobuf = "2.6.1"
rustjail = { path = "../rustjail" }
protocols = { path = "../protocols" }

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,7 @@ serde_derive = "1.0.91"
oci = { path = "../oci" }
protocols = { path ="../protocols" }
caps = "0.3.0"
nix = "0.17.0"
nix = "0.14.1"
scopeguard = "1.0.0"
prctl = "1.0.0"
lazy_static = "1.3.0"
@@ -22,4 +22,4 @@ slog = "2.5.2"
slog-scope = "4.1.2"
scan_fmt = "0.2"
regex = "1.1"
path-absolutize = { git = "git://github.com/magiclen/path-absolutize.git", tag= "v1.1.3" }
path-absolutize = { git = "git://github.com/magiclen/path-absolutize.git", tag= "v1.2.0" }

View File

@@ -6,7 +6,7 @@
use lazy_static;
use protocols::oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec};
use serde_json;
use std::ffi::{CStr, CString};
use std::ffi::CString;
use std::fs;
use std::mem;
use std::os::unix::io::RawFd;
@@ -672,21 +672,19 @@ fn do_exec(logger: &Logger, path: &str, args: &[String], env: &[String]) -> Resu
let logger = logger.new(o!("command" => "exec"));
let p = CString::new(path.to_string()).unwrap();
let sa: Vec<CString> = args
let a: Vec<CString> = args
.iter()
.map(|s| CString::new(s.to_string()).unwrap_or_default())
.collect();
let a: Vec<&CStr> = sa.iter().map(|s| s.as_c_str()).collect();
for (key, _) in env::vars() {
env::remove_var(key);
}
for e in env.iter() {
let v: Vec<&str> = e.splitn(2, "=").collect();
let v: Vec<&str> = e.split("=").collect();
if v.len() != 2 {
info!(logger, "incorrect env config!");
continue;
}
env::set_var(v[0], v[1]);
}
@@ -698,7 +696,7 @@ fn do_exec(logger: &Logger, path: &str, args: &[String], env: &[String]) -> Resu
*/
// execvp doesn't use env for the search path, so we set env manually
debug!(logger, "exec process right now!");
if let Err(e) = unistd::execvp(p.as_c_str(), a.as_slice()) {
if let Err(e) = unistd::execvp(&p, &a) {
info!(logger, "execve failed!!!");
info!(logger, "binary: {:?}, args: {:?}, envs: {:?}", p, a, env);
match e {

View File

@@ -1,3 +1,8 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::container::Config;
use crate::errors::*;
use lazy_static;

View File

@@ -10,8 +10,6 @@ const DEBUG_CONSOLE_FLAG: &str = "agent.debug_console";
const DEV_MODE_FLAG: &str = "agent.devmode";
const LOG_LEVEL_OPTION: &str = "agent.log";
const HOTPLUG_TIMOUT_OPTION: &str = "agent.hotplug_timeout";
const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
const LOG_VPORT_OPTION: &str = "agent.log_vport";
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
@@ -26,8 +24,6 @@ pub struct agentConfig {
pub dev_mode: bool,
pub log_level: slog::Level,
pub hotplug_timeout: time::Duration,
pub debug_console_vport: i32,
pub log_vport: i32,
}
impl agentConfig {
@@ -37,8 +33,6 @@ impl agentConfig {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
debug_console_vport: 0,
log_vport: 0,
}
}
@@ -66,35 +60,12 @@ impl agentConfig {
self.hotplug_timeout = hotplugTimeout;
}
}
if param.starts_with(format!("{}=", DEBUG_CONSOLE_VPORT_OPTION).as_str()) {
let port = get_vsock_port(param)?;
if port > 0 {
self.debug_console_vport = port;
}
}
if param.starts_with(format!("{}=", LOG_VPORT_OPTION).as_str()) {
let port = get_vsock_port(param)?;
if port > 0 {
self.log_vport = port;
}
}
}
Ok(())
}
}
fn get_vsock_port(p: &str) -> Result<i32> {
let fields: Vec<&str> = p.split("=").collect();
if fields.len() != 2 {
return Err(ErrorKind::ErrorCode("invalid port parameter".to_string()).into());
}
Ok(fields[1].parse::<i32>()?)
}
// Map logrus (https://godoc.org/github.com/sirupsen/logrus)
// log level to the equivalent slog log levels.
//

View File

@@ -40,7 +40,6 @@ use netlink::{RtnlHandle, NETLINK_ROUTE};
use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ};
use serde_json;
use std::convert::TryFrom;
use std::fs;
use std::os::unix::io::RawFd;
use std::os::unix::prelude::PermissionsExt;
@@ -80,15 +79,7 @@ impl agentService {
let sandbox;
let mut s;
let oci = match oci_spec.as_mut() {
Some(spec) => spec,
None => {
error!(sl!(), "no oci spec in the create container request!");
return Err(
ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into(),
);
}
};
let oci = oci_spec.as_mut().unwrap();
info!(sl!(), "receive createcontainer {}", &cid);
@@ -122,9 +113,7 @@ impl agentService {
// write spec to bundle path, hooks might
// read ocispec
let olddir = setup_bundle(oci)?;
// restore the cwd for kata-agent process.
defer!(unistd::chdir(&olddir).unwrap());
setup_bundle(oci)?;
let opts = CreateOpts {
cgroup_name: "".to_string(),
@@ -304,7 +293,7 @@ impl agentService {
);
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), true)?;
let mut signal = Signal::try_from(req.signal as i32).unwrap();
let mut signal = Signal::from_c_int(req.signal as i32).unwrap();
// For container initProcess, if it hasn't installed handler for "SIGTERM" signal,
// it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal
@@ -1749,7 +1738,7 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
Ok(())
}
fn setup_bundle(gspec: &Spec) -> Result<PathBuf> {
fn setup_bundle(gspec: &Spec) -> Result<()> {
if gspec.Root.is_none() {
return Err(nix::Error::Sys(Errno::EINVAL).into());
}
@@ -1768,8 +1757,7 @@ fn setup_bundle(gspec: &Spec) -> Result<PathBuf> {
);
let _ = oci.save(config.as_str());
let olddir = unistd::getcwd().chain_err(|| "cannot getcwd")?;
unistd::chdir(bundle_path)?;
Ok(olddir)
Ok(())
}

View File

@@ -16,7 +16,7 @@ pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
target_arch = "x86"
))]
pub const PCI_ROOT_BUS_PATH: &str = "/devices/pci0000:00";
#[cfg(target_arch = "aarch64")]
#[cfg(target_arch = "arm")]
pub const PCI_ROOT_BUS_PATH: &str = "/devices/platform/4010000000.pcie/pci0000:00";
pub const SYSFS_CPU_ONLINE_PATH: &str = "/sys/devices/system/cpu";

View File

@@ -2,8 +2,6 @@
//
// SPDX-License-Identifier: Apache-2.0
//
#[macro_use]
extern crate slog;
use slog::{BorrowedKV, Drain, Key, OwnedKV, OwnedKVList, Record, KV};
use std::collections::HashMap;
@@ -148,6 +146,12 @@ impl<D> RuntimeLevelFilter<D> {
level: Mutex::new(level),
}
}
fn set_level(&self, level: slog::Level) {
let mut log_level = self.level.lock().unwrap();
*log_level = level;
}
}
impl<D> Drain for RuntimeLevelFilter<D>

View File

@@ -20,17 +20,14 @@ extern crate signal_hook;
extern crate scan_fmt;
extern crate oci;
#[macro_use]
extern crate scopeguard;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_json;
#[macro_use]
extern crate netlink;
use futures::*;
use nix::fcntl::{self, OFlag};
use nix::sys::socket::{self, AddressFamily, SockAddr, SockFlag, SockType};
use nix::sys::wait::{self, WaitStatus};
use nix::unistd;
use prctl::set_child_subreaper;
@@ -38,7 +35,7 @@ use rustjail::errors::*;
use signal_hook::{iterator::Signals, SIGCHLD};
use std::collections::HashMap;
use std::env;
use std::fs::{self, File};
use std::fs;
use std::os::unix::fs as unixfs;
use std::os::unix::io::AsRawFd;
use std::path::Path;
@@ -50,6 +47,7 @@ use unistd::Pid;
mod config;
mod device;
mod linux_abi;
mod logging;
mod mount;
mod namespace;
mod network;
@@ -107,17 +105,13 @@ fn main() -> Result<()> {
lazy_static::initialize(&SHELLS);
lazy_static::initialize(&AGENT_CONFIG);
// support vsock log
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
let writer = unsafe { File::from_raw_fd(wfd) };
let agentConfig = AGENT_CONFIG.clone();
if unistd::getpid() == Pid::from_raw(1) {
// Init a temporary logger used by init agent as init process
// since before do the base mount, it wouldn't access "/proc/cmdline"
// to get the customzied debug level.
let writer = io::stdout();
let logger = logging::create_logger(NAME, "agent", slog::Level::Debug, writer);
init_agent_as_init(&logger)?;
}
@@ -131,32 +125,7 @@ fn main() -> Result<()> {
}
let config = agentConfig.read().unwrap();
let log_vport = config.log_vport as u32;
let log_handle = thread::spawn(move || -> Result<()> {
let mut reader = unsafe { File::from_raw_fd(rfd) };
if log_vport > 0 {
let listenfd = socket::socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::SOCK_CLOEXEC,
None,
)?;
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, log_vport);
socket::bind(listenfd, &addr)?;
socket::listen(listenfd, 1)?;
let datafd = socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?;
let mut log_writer = unsafe { File::from_raw_fd(datafd) };
let _ = io::copy(&mut reader, &mut log_writer)?;
let _ = unistd::close(listenfd);
let _ = unistd::close(datafd);
}
// copy log to stdout
let mut stdout_writer = io::stdout();
let _ = io::copy(&mut reader, &mut stdout_writer)?;
Ok(())
});
let writer = unsafe { File::from_raw_fd(wfd) };
let writer = io::stdout();
// Recreate a logger with the log level get from "/proc/cmdline".
let logger = logging::create_logger(NAME, "agent", config.log_level, writer);
@@ -174,14 +143,13 @@ fn main() -> Result<()> {
let _guard = slog_scope::set_global_logger(logger.new(o!("subsystem" => "grpc")));
let shells = SHELLS.clone();
let debug_console_vport = config.debug_console_vport as u32;
let shell_handle = if config.debug_console {
let thread_logger = logger.clone();
thread::spawn(move || {
let shells = shells.lock().unwrap();
let result = setup_debug_console(shells.to_vec(), debug_console_vport);
let result = setup_debug_console(shells.to_vec());
if result.is_err() {
// Report error, but don't fail
warn!(thread_logger, "failed to setup debug console";
@@ -228,7 +196,6 @@ fn main() -> Result<()> {
// let _ = rx.wait();
handle.join().unwrap();
let _ = log_handle.join();
if config.debug_console {
shell_handle.join().unwrap();
@@ -363,35 +330,18 @@ lazy_static! {
// pub static mut TRACE_MODE: ;
use crate::config::agentConfig;
use nix::fcntl::{self, OFlag};
use nix::sys::stat::Mode;
use std::os::unix::io::{FromRawFd, RawFd};
use std::path::PathBuf;
use std::process::{exit, Command, Stdio};
fn setup_debug_console(shells: Vec<String>, port: u32) -> Result<()> {
fn setup_debug_console(shells: Vec<String>) -> Result<()> {
for shell in shells.iter() {
let binary = PathBuf::from(shell);
if binary.exists() {
let f: RawFd = if port > 0 {
let listenfd = socket::socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::SOCK_CLOEXEC,
None,
)?;
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, port);
socket::bind(listenfd, &addr)?;
socket::listen(listenfd, 1)?;
socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?
} else {
let mut flags = OFlag::empty();
flags.insert(OFlag::O_RDWR);
flags.insert(OFlag::O_CLOEXEC);
fcntl::open(CONSOLE_PATH, flags, Mode::empty())?
};
let f: RawFd = fcntl::open(CONSOLE_PATH, OFlag::O_RDWR, Mode::empty())?;
let cmd = Command::new(shell)
.arg("-i")
.stdin(unsafe { Stdio::from_raw_fd(f) })
.stdout(unsafe { Stdio::from_raw_fd(f) })
.stderr(unsafe { Stdio::from_raw_fd(f) })
@@ -431,7 +381,7 @@ mod tests {
let mut shells = shells_ref.lock().unwrap();
shells.clear();
let result = setup_debug_console(shells.to_vec(), 0);
let result = setup_debug_console(shells.to_vec());
assert!(result.is_err());
assert_eq!(result.unwrap_err().to_string(), "Error Code: 'no shell'");
@@ -454,7 +404,7 @@ mod tests {
shells.push(shell);
let result = setup_debug_console(shells.to_vec(), 0);
let result = setup_debug_console(shells.to_vec());
assert!(result.is_err());
assert_eq!(

View File

@@ -97,22 +97,16 @@ impl Sandbox {
//
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
pub fn unset_sandbox_storage(&mut self, path: &str) -> bool {
match self.storages.get_mut(path) {
None => {
return Err(ErrorKind::ErrorCode(format!(
"Sandbox storage with path {} not found",
path
))
.into())
}
None => return false,
Some(count) => {
*count -= 1;
if *count < 1 {
self.storages.remove(path);
return Ok(true);
return true;
}
return Ok(false);
false
}
}
}
@@ -136,15 +130,8 @@ impl Sandbox {
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
match self.unset_sandbox_storage(path) {
Ok(res) => {
if res {
return self.remove_sandbox_storage(path);
}
}
Err(err) => {
return Err(err);
}
if self.unset_sandbox_storage(path) {
return self.remove_sandbox_storage(path);
}
Ok(())
}
@@ -275,279 +262,3 @@ fn online_memory(logger: &Logger) -> Result<()> {
online_resources(logger, SYSFS_MEMORY_ONLINE_PATH, r"memory[0-9]+", -1)?;
Ok(())
}
#[cfg(test)]
mod tests {
//use rustjail::Error;
use super::Sandbox;
use crate::{mount::BareMount, skip_if_not_root};
use nix::mount::MsFlags;
use protocols::oci::{Linux, Root, Spec};
use rustjail::container::LinuxContainer;
use rustjail::specconv::CreateOpts;
use slog::Logger;
use tempfile::Builder;
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), rustjail::errors::Error> {
let baremount = BareMount::new(src, dst, "bind", MsFlags::MS_BIND, "", &logger);
baremount.mount()
}
#[test]
fn set_sandbox_storage() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let tmpdir = Builder::new().tempdir().unwrap();
let tmpdir_path = tmpdir.path().to_str().unwrap();
// Add a new sandbox storage
let new_storage = s.set_sandbox_storage(&tmpdir_path);
// Check the reference counter
let ref_count = s.storages[tmpdir_path];
assert_eq!(
ref_count, 1,
"Invalid refcount, got {} expected 1.",
ref_count
);
assert_eq!(new_storage, true);
// Use the existing sandbox storage
let new_storage = s.set_sandbox_storage(&tmpdir_path);
assert_eq!(new_storage, false, "Should be false as already exists.");
// Since we are using existing storage, the reference counter
// should be 2 by now.
let ref_count = s.storages[tmpdir_path];
assert_eq!(
ref_count, 2,
"Invalid refcount, got {} expected 2.",
ref_count
);
}
#[test]
fn remove_sandbox_storage() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let s = Sandbox::new(&logger).unwrap();
let tmpdir = Builder::new().tempdir().unwrap();
let tmpdir_path = tmpdir.path().to_str().unwrap();
let srcdir = Builder::new()
.prefix("src")
.tempdir_in(tmpdir_path)
.unwrap();
let srcdir_path = srcdir.path().to_str().unwrap();
let destdir = Builder::new()
.prefix("dest")
.tempdir_in(tmpdir_path)
.unwrap();
let destdir_path = destdir.path().to_str().unwrap();
let emptydir = Builder::new()
.prefix("empty")
.tempdir_in(tmpdir_path)
.unwrap();
assert!(
s.remove_sandbox_storage(&srcdir_path).is_err(),
"Expect Err as the directory i not a mountpoint"
);
assert!(s.remove_sandbox_storage("").is_err());
let invalid_dir = emptydir.path().join("invalid");
assert!(s
.remove_sandbox_storage(invalid_dir.to_str().unwrap())
.is_err());
// Now, create a double mount as this guarantees the directory cannot
// be deleted after the first umount.
for _i in 0..2 {
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
}
assert!(
s.remove_sandbox_storage(destdir_path).is_err(),
"Expect fail as deletion cannot happen due to the second mount."
);
// This time it should work as the previous two calls have undone the double
// mount.
assert!(s.remove_sandbox_storage(destdir_path).is_ok());
}
#[test]
#[allow(unused_assignments)]
fn unset_and_remove_sandbox_storage() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
// FIX: This test fails, not sure why yet.
assert!(
s.unset_and_remove_sandbox_storage("/tmp/testEphePath")
.is_err(),
"Should fail because sandbox storage doesn't exist"
);
let tmpdir = Builder::new().tempdir().unwrap();
let tmpdir_path = tmpdir.path().to_str().unwrap();
let srcdir = Builder::new()
.prefix("src")
.tempdir_in(tmpdir_path)
.unwrap();
let srcdir_path = srcdir.path().to_str().unwrap();
let destdir = Builder::new()
.prefix("dest")
.tempdir_in(tmpdir_path)
.unwrap();
let destdir_path = destdir.path().to_str().unwrap();
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
assert_eq!(s.set_sandbox_storage(&destdir_path), true);
assert!(s.unset_and_remove_sandbox_storage(&destdir_path).is_ok());
let mut other_dir_str = String::new();
{
// Create another folder in a separate scope to ensure that is
// deleted
let other_dir = Builder::new()
.prefix("dir")
.tempdir_in(tmpdir_path)
.unwrap();
let other_dir_path = other_dir.path().to_str().unwrap();
other_dir_str = other_dir_path.to_string();
assert_eq!(s.set_sandbox_storage(&other_dir_path), true);
}
assert!(s.unset_and_remove_sandbox_storage(&other_dir_str).is_err());
}
#[test]
fn unset_sandbox_storage() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let storage_path = "/tmp/testEphe";
// Add a new sandbox storage
assert_eq!(s.set_sandbox_storage(&storage_path), true);
// Use the existing sandbox storage
assert_eq!(
s.set_sandbox_storage(&storage_path),
false,
"Expects false as the storage is not new."
);
assert_eq!(
s.unset_sandbox_storage(&storage_path).unwrap(),
false,
"Expects false as there is still a storage."
);
// Reference counter should decrement to 1.
let ref_count = s.storages[storage_path];
assert_eq!(
ref_count, 1,
"Invalid refcount, got {} expected 1.",
ref_count
);
assert_eq!(
s.unset_sandbox_storage(&storage_path).unwrap(),
true,
"Expects true as there is still a storage."
);
// Since no container is using this sandbox storage anymore
// there should not be any reference in sandbox struct
// for the given storage
assert!(
!s.storages.contains_key(storage_path),
"The storages map should not contain the key {}",
storage_path
);
// If no container is using the sandbox storage, the reference
// counter for it should not exist.
assert!(
s.unset_sandbox_storage(&storage_path).is_err(),
"Expects false as the reference counter should no exist."
);
}
fn create_dummy_opts() -> CreateOpts {
let mut root = Root::new();
root.Path = String::from("/");
let linux = Linux::new();
let mut spec = Spec::new();
spec.Root = Some(root).into();
spec.Linux = Some(linux).into();
CreateOpts {
cgroup_name: "".to_string(),
use_systemd_cgroup: false,
no_pivot_root: false,
no_new_keyring: false,
spec: Some(spec),
rootless_euid: false,
rootless_cgroup: false,
}
}
fn create_linuxcontainer() -> LinuxContainer {
LinuxContainer::new(
"some_id",
"/run/agent",
create_dummy_opts(),
&slog_scope::logger(),
)
.unwrap()
}
#[test]
fn get_container_entry_exist() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let linux_container = create_linuxcontainer();
s.containers
.insert("testContainerID".to_string(), linux_container);
let cnt = s.get_container("testContainerID");
assert!(cnt.is_some());
}
#[test]
fn get_container_no_entry() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let cnt = s.get_container("testContainerID");
assert!(cnt.is_none());
}
#[test]
fn add_and_get_container() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let linux_container = create_linuxcontainer();
s.add_container(linux_container);
assert!(s.get_container("some_id").is_some());
}
}