Compare commits

..

23 Commits

Author SHA1 Message Date
Jose Carlos Venegas Munoz
67849dd77b Merge pull request #1020 from jcvenegas/1.10.8-branch-bump
# Kata Containers 1.10.8
2020-10-21 14:18:43 -05:00
Carlos Venegas
c83bca75bd release: Kata Containers 1.10.8
Version bump no changes

Signed-off-by: Carlos Venegas <jos.c.venegas.munoz@intel.com>
2020-10-21 10:46:43 -05:00
Eric Ernst
7d7078e5c2 Merge pull request #632 from egernst/1.10.7-branch-bump
# Kata Containers 1.10.7
2020-08-28 08:54:56 -07:00
Eric Ernst
b641c3fe25 release: Kata Containers 1.10.7
Version bump no changes

Signed-off-by: Eric Ernst <eric.g.ernst@gmail.com>
2020-08-27 09:10:59 -07:00
Archana Shinde
ac39720b48 Merge pull request #349 from bergwolf/1.10.6-branch-bump
# Kata Containers 1.10.6
2020-07-01 09:35:54 -07:00
Peng Tao
e8412104ca release: Kata Containers 1.10.6
Version bump no changes

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-06-29 03:17:19 +00:00
Salvador Fuentes
778bfd9608 Merge pull request #274 from amshinde/1.10.5-branch-bump
# Kata Containers 1.10.5
2020-06-05 17:38:31 -05:00
Archana Shinde
62101c2155 release: Kata Containers 1.10.5
Version bump no changes

Signed-off-by: Archana Shinde <archana.m.shinde@intel.com>
2020-06-05 15:45:47 +00:00
Jose Carlos Venegas Munoz
93428ca15f Merge pull request #213 from jcvenegas/backport-1.10-fix-release
release: actions: pin artifact to v1
2020-05-07 11:01:08 -05:00
Jose Carlos Venegas Munoz
486ebf73f8 release: actions: pin artifact to v1
the actions upload/download-artifact moved to a new version
and master now is not comptible.

Fixes: #211

Signed-off-by: Jose Carlos Venegas Munoz <jose.carlos.venegas.munoz@intel.com>
2020-05-07 15:38:26 +00:00
Jose Carlos Venegas Munoz
57ac170ca5 Merge pull request #208 from katabuilder/1.10.4-branch-bump
# Kata Containers 1.10.4
2020-05-06 12:26:57 -05:00
katabuilder
31d2b77836 release: Kata Containers 1.10.4
Version bump no changes

Signed-off-by: katabuilder <katabuilder@katacontainers.io>
2020-05-05 20:53:35 +00:00
Salvador Fuentes
2a77d1730d Merge pull request #182 from chavafg/1.10.3-branch-bump
# Kata Containers 1.10.3
2020-04-17 18:13:36 -05:00
Archana Shinde
1199642234 Merge pull request #185 from chavafg/topic/fix-license
license: add license header
2020-04-17 15:59:46 -07:00
Salvador Fuentes
c6551f67ad license: add license header
add license header to `src/agent/rustjail/src/validator.rs`

Fixes: #184.

Signed-off-by: Salvador Fuentes <salvador.fuentes@intel.com>
2020-04-17 16:51:55 -05:00
Salvador Fuentes
10759b7251 release: Kata Containers 1.10.3
failed to get logs
Version bump no changes

Signed-off-by: Salvador Fuentes <salvador.fuentes@intel.com>
2020-04-17 17:54:16 +00:00
Xu Wang
f969e5645d Merge pull request #161 from bergwolf/1.10.2-branch-bump
# Kata Containers 1.10.2
2020-03-18 11:04:40 +08:00
Peng Tao
b7ece51b3f release: Kata Containers 1.10.2
537ecbe path-absolutize: version update

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
2020-03-18 10:47:57 +08:00
Penny Zheng
537ecbee8e path-absolutize: version update
The latest tag version v1.2.0 fixes the error of inapporiately using
mutable static.

Fixes: #144

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
2020-03-18 10:46:22 +08:00
Archana Shinde
aaff08d425 Merge pull request #140 from amshinde/1.10.1-branch-bump
# Kata Containers 1.10.1
2020-02-18 14:08:27 -08:00
Archana Shinde
44f0052967 release: Kata Containers 1.10.1
Version bump no changes

Signed-off-by: Archana Shinde <archana.m.shinde@intel.com>
2020-02-18 19:21:10 +00:00
GabyCT
85b3a67215 Merge pull request #121 from jcvenegas/1.10.0-branch-bump
# Kata Containers 1.10.0
2020-01-14 14:00:08 -06:00
Jose Carlos Venegas Munoz
fc0deb5b9a release: Kata Containers 1.10.0
Starting to version this repository

Signed-off-by: Jose Carlos Venegas Munoz <jose.carlos.venegas.munoz@intel.com>
2020-01-14 19:16:13 +00:00
22 changed files with 139 additions and 1678 deletions

View File

@@ -14,5 +14,5 @@ do
tar -xvf $c
done
tar cvfJ ../kata-static.tar.xz ./opt
tar cfJ ../kata-static.tar.xz ./opt
popd >>/dev/null

View File

@@ -1,4 +1,3 @@
name: Publish release tarball
on:
push:
tags:
@@ -17,7 +16,7 @@ jobs:
popd
./packaging/artifact-list.sh > artifact-list.txt
- name: save-artifact-list
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: artifact-list
path: artifact-list.txt
@@ -30,7 +29,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- run: |
@@ -45,7 +44,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-kernel.tar.gz
@@ -58,7 +57,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- run: |
@@ -73,7 +72,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-experimental-kernel.tar.gz
@@ -86,7 +85,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-qemu
@@ -99,7 +98,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-qemu.tar.gz
@@ -112,7 +111,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-nemu
@@ -125,7 +124,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-nemu.tar.gz
@@ -139,7 +138,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-qemu-virtiofsd
@@ -152,7 +151,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-qemu-virtiofsd.tar.gz
@@ -166,7 +165,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-image
@@ -179,7 +178,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-image.tar.gz
@@ -193,7 +192,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-firecracker
@@ -206,7 +205,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-firecracker.tar.gz
@@ -220,7 +219,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-clh
@@ -233,7 +232,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-clh.tar.gz
@@ -247,7 +246,7 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifact-list
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: artifact-list
- name: build-kata-components
@@ -260,7 +259,7 @@ jobs:
fi
- name: store-artifacts
if: env.artifact-built == 'true'
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: kata-artifacts
path: kata-static-kata-components.tar.gz
@@ -271,14 +270,14 @@ jobs:
steps:
- uses: actions/checkout@v1
- name: get-artifacts
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: kata-artifacts
- name: colate-artifacts
run: |
$GITHUB_WORKSPACE/.github/workflows/gather-artifacts.sh
- name: store-artifacts
uses: actions/upload-artifact@master
uses: actions/upload-artifact@v1
with:
name: release-candidate
path: kata-static.tar.xz
@@ -288,7 +287,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: get-artifacts
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: release-candidate
- name: build-and-push-kata-deploy-ci
@@ -329,7 +328,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: download-artifacts
uses: actions/download-artifact@master
uses: actions/download-artifact@v1
with:
name: release-candidate
- name: install hub
@@ -340,10 +339,7 @@ jobs:
- name: push static tarball to github
run: |
tag=$(echo $GITHUB_REF | cut -d/ -f3-)
tarball="kata-static-$tag-x86_64.tar.xz"
repo="https://github.com/kata-containers/runtime.git"
mv release-candidate/kata-static.tar.xz "release-candidate/${tarball}"
git clone "${repo}"
mv release-candidate/kata-static.tar.xz release-candidate/kata-static-$tag-x86_64.tar.xz
git clone https://github.com/kata-containers/runtime.git
cd runtime
echo "uploading asset '${tarball}' to '${repo}' tag: ${tag}"
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a "../release-candidate/${tarball}" "${tag}"
GITHUB_TOKEN=${{ secrets.GIT_UPLOAD_TOKEN }} hub release edit -m "" -a ../release-candidate/kata-static-${tag}-x86_64.tar.xz "${tag}"

View File

@@ -1 +1 @@
1.11.0-alpha1
1.10.8

View File

@@ -28,6 +28,12 @@ run_static_checks()
bash "$tests_repo_dir/.ci/static-checks.sh" "github.com/kata-containers/kata-containers"
}
run_rust_test()
{
clone_tests_repo
bash "$tests_repo_dir/.ci/rust-test.sh"
}
run_go_test()
{
clone_tests_repo

View File

@@ -1,15 +0,0 @@
## Copyright (c) 2020 ARM Limited
##
## SPDX-License-Identifier: Apache-2.0
##
[target.aarch64-unknown-linux-musl]
## Only setting linker with `aarch64-linux-musl-gcc`, the
## `rust-agent` could be totally statically linked.
linker = "aarch64-linux-musl-gcc"
## The __addtf3, __subtf3 and __multf3 symbols are used by aarch64-musl,
## but are not provided by rust compiler-builtins.
## For now, the only functional workaround accepted by rust communities
## is to get them from libgcc.
rustflags = [ "-C", "link-arg=-lgcc" ]

View File

@@ -6,7 +6,6 @@ edition = "2018"
[dependencies]
oci = { path = "oci" }
logging = { path = "logging" }
rustjail = { path = "rustjail" }
protocols = { path = "protocols" }
netlink = { path = "netlink" }
@@ -16,27 +15,19 @@ grpcio = { git="https://github.com/alipay/grpc-rs", branch="rust_agent" }
protobuf = "2.6.1"
futures = "0.1.27"
libc = "0.2.58"
nix = "0.17.0"
nix = "0.14.1"
prctl = "1.0.0"
serde_json = "1.0.39"
signal-hook = "0.1.9"
scan_fmt = "0.2.3"
scopeguard = "1.0.0"
regex = "1"
# slog:
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
# - The 'max_*' features allow changing the log level at runtime
# (by stopping the compiler from removing log calls).
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
slog-json = "2.3.0"
slog-async = "2.3.0"
slog-scope = "4.1.2"
# for testing
tempfile = "3.1.0"
[workspace]
members = [
"logging",
"netlink",
"oci",
"protocols",
"rustjail",
]

View File

@@ -34,22 +34,6 @@ TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
DESTDIR :=
BINDIR := /usr/bin
# Define if agent will be installed as init
INIT := no
# Path to systemd unit directory if installed as not init.
UNIT_DIR := /usr/lib/systemd/system
GENERATED_FILES :=
ifeq ($(INIT),no)
# Unit file to start kata agent in systemd systems
UNIT_FILES = kata-agent.service
GENERATED_FILES := $(UNIT_FILES)
# Target to be reached in systemd services
UNIT_FILES += kata-containers.target
endif
# Display name of command and it's version (or a message if not available).
#
# Arguments:
@@ -63,10 +47,6 @@ define get_toolchain_version
$(shell printf "%s: %s\\n" "toolchain" "$(or $(shell rustup show active-toolchain 2>/dev/null), (unknown))")
endef
define INSTALL_FILE
install -D -m 644 $1 $(DESTDIR)$2/$1 || exit 1;
endef
default: $(TARGET) show-header
$(TARGET): $(TARGET_PATH)
@@ -77,30 +57,18 @@ $(TARGET_PATH): $(SOURCES) | show-summary
show-header:
@printf "%s - version %s (commit %s)\n\n" "$(TARGET)" "$(VERSION)" "$(COMMIT_MSG)"
$(GENERATED_FILES): %: %.in
@sed \
-e 's|[@]bindir[@]|$(BINDIR)|g' \
-e 's|[@]kata-agent[@]|$(TARGET)|g' \
"$<" > "$@"
install: build-service
install:
@install -D $(TARGET_PATH) $(DESTDIR)/$(BINDIR)/$(TARGET)
clean:
@cargo clean
check:
@cargo test --all --target $(TRIPLE)
@cargo test --target $(TRIPLE)
run:
@cargo run --target $(TRIPLE)
build-service: $(GENERATED_FILES)
ifeq ($(INIT),no)
@echo "Installing systemd unit files..."
$(foreach f,$(UNIT_FILES),$(call INSTALL_FILE,$f,$(UNIT_DIR)))
endif
show-summary: show-header
@printf "project:\n"
@printf " name: $(PROJECT_NAME)\n"

View File

@@ -1,22 +0,0 @@
#
# Copyright (c) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
[Unit]
Description=Kata Containers Agent
Documentation=https://github.com/kata-containers/kata-containers
Wants=kata-containers.target
[Service]
# Send agent output to tty to allow capture debug logs
# from a VM vsock port
StandardOutput=tty
Type=simple
ExecStart=@bindir@/@kata-agent@
LimitNOFILE=infinity
# ExecStop is required for static agent tracing; in all other scenarios
# the runtime handles shutting down the VM.
ExecStop=/bin/sync ; /usr/bin/systemctl --force poweroff
FailureAction=poweroff

View File

@@ -1,15 +0,0 @@
#
# Copyright (c) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
[Unit]
Description=Kata Containers Agent Target
Requires=basic.target
Requires=tmp.mount
Wants=chronyd.service
Requires=kata-agent.service
Conflicts=rescue.service rescue.target
After=basic.target rescue.service rescue.target
AllowIsolate=yes

View File

@@ -1,20 +0,0 @@
[package]
name = "logging"
version = "0.1.0"
authors = ["Tim Zhang <tim@hyper.sh>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
serde_json = "1.0.39"
# slog:
# - Dynamic keys required to allow HashMap keys to be slog::Serialized.
# - The 'max_*' features allow changing the log level at runtime
# (by stopping the compiler from removing log calls).
slog = { version = "2.5.2", features = ["dynamic-keys", "max_level_trace", "release_max_level_info"] }
slog-json = "2.3.0"
slog-async = "2.3.0"
slog-scope = "4.1.2"
# for testing
tempfile = "3.1.0"

View File

@@ -8,7 +8,7 @@ edition = "2018"
[dependencies]
libc = "0.2.58"
nix = "0.17.0"
nix = "0.14.1"
protobuf = "2.6.1"
rustjail = { path = "../rustjail" }
protocols = { path = "../protocols" }

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,7 @@ serde_derive = "1.0.91"
oci = { path = "../oci" }
protocols = { path ="../protocols" }
caps = "0.3.0"
nix = "0.17.0"
nix = "0.14.1"
scopeguard = "1.0.0"
prctl = "1.0.0"
lazy_static = "1.3.0"

View File

@@ -6,7 +6,7 @@
use lazy_static;
use protocols::oci::{Hook, Linux, LinuxNamespace, LinuxResources, POSIXRlimit, Spec};
use serde_json;
use std::ffi::{CStr, CString};
use std::ffi::CString;
use std::fs;
use std::mem;
use std::os::unix::io::RawFd;
@@ -672,21 +672,19 @@ fn do_exec(logger: &Logger, path: &str, args: &[String], env: &[String]) -> Resu
let logger = logger.new(o!("command" => "exec"));
let p = CString::new(path.to_string()).unwrap();
let sa: Vec<CString> = args
let a: Vec<CString> = args
.iter()
.map(|s| CString::new(s.to_string()).unwrap_or_default())
.collect();
let a: Vec<&CStr> = sa.iter().map(|s| s.as_c_str()).collect();
for (key, _) in env::vars() {
env::remove_var(key);
}
for e in env.iter() {
let v: Vec<&str> = e.splitn(2, "=").collect();
let v: Vec<&str> = e.split("=").collect();
if v.len() != 2 {
info!(logger, "incorrect env config!");
continue;
}
env::set_var(v[0], v[1]);
}
@@ -698,7 +696,7 @@ fn do_exec(logger: &Logger, path: &str, args: &[String], env: &[String]) -> Resu
*/
// execvp doesn't use env for the search path, so we set env manually
debug!(logger, "exec process right now!");
if let Err(e) = unistd::execvp(p.as_c_str(), a.as_slice()) {
if let Err(e) = unistd::execvp(&p, &a) {
info!(logger, "execve failed!!!");
info!(logger, "binary: {:?}, args: {:?}, envs: {:?}", p, a, env);
match e {

View File

@@ -12,7 +12,7 @@ use std::os::unix::io::RawFd;
// use crate::cgroups::Manager as CgroupManager;
// use crate::intelrdt::Manager as RdtManager;
use nix::fcntl::{fcntl, FcntlArg, OFlag};
use nix::fcntl::OFlag;
use nix::sys::signal::{self, Signal};
use nix::sys::socket::{self, AddressFamily, SockFlag, SockType};
use nix::sys::wait::{self, WaitStatus};
@@ -72,13 +72,7 @@ impl ProcessOperations for Process {
}
impl Process {
pub fn new(
logger: &Logger,
ocip: &OCIProcess,
id: &str,
init: bool,
pipe_size: i32,
) -> Result<Self> {
pub fn new(logger: &Logger, ocip: &OCIProcess, id: &str, init: bool) -> Result<Self> {
let logger = logger.new(o!("subsystem" => "process"));
let mut p = Process {
@@ -134,54 +128,14 @@ impl Process {
p.parent_stdin = Some(pstdin);
p.stdin = Some(stdin);
let (pstdout, stdout) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
let (pstdout, stdout) = unistd::pipe2(OFlag::O_CLOEXEC)?;
p.parent_stdout = Some(pstdout);
p.stdout = Some(stdout);
let (pstderr, stderr) = create_extended_pipe(OFlag::O_CLOEXEC, pipe_size)?;
let (pstderr, stderr) = unistd::pipe2(OFlag::O_CLOEXEC)?;
p.parent_stderr = Some(pstderr);
p.stderr = Some(stderr);
Ok(p)
}
}
fn create_extended_pipe(flags: OFlag, pipe_size: i32) -> Result<(RawFd, RawFd)> {
let (r, w) = unistd::pipe2(flags)?;
if pipe_size > 0 {
fcntl(w, FcntlArg::F_SETPIPE_SZ(pipe_size))?;
}
Ok((r, w))
}
#[cfg(test)]
mod tests {
use crate::process::create_extended_pipe;
use nix::fcntl::{fcntl, FcntlArg, OFlag};
use std::fs;
use std::os::unix::io::RawFd;
fn get_pipe_max_size() -> i32 {
fs::read_to_string("/proc/sys/fs/pipe-max-size")
.unwrap()
.trim()
.parse::<i32>()
.unwrap()
}
fn get_pipe_size(fd: RawFd) -> i32 {
fcntl(fd, FcntlArg::F_GETPIPE_SZ).unwrap()
}
#[test]
fn test_create_extended_pipe() {
// Test the default
let (r, w) = create_extended_pipe(OFlag::O_CLOEXEC, 0).unwrap();
// Test setting to the max size
let max_size = get_pipe_max_size();
let (r, w) = create_extended_pipe(OFlag::O_CLOEXEC, max_size).unwrap();
let actual_size = get_pipe_size(w);
assert_eq!(max_size, actual_size);
}
}

View File

@@ -1,3 +1,8 @@
// Copyright (c) 2019 Ant Financial
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::container::Config;
use crate::errors::*;
use lazy_static;

View File

@@ -10,13 +10,9 @@ const DEBUG_CONSOLE_FLAG: &str = "agent.debug_console";
const DEV_MODE_FLAG: &str = "agent.devmode";
const LOG_LEVEL_OPTION: &str = "agent.log";
const HOTPLUG_TIMOUT_OPTION: &str = "agent.hotplug_timeout";
const DEBUG_CONSOLE_VPORT_OPTION: &str = "agent.debug_console_vport";
const LOG_VPORT_OPTION: &str = "agent.log_vport";
const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
// FIXME: unused
const TRACE_MODE_FLAG: &str = "agent.trace";
@@ -28,9 +24,6 @@ pub struct agentConfig {
pub dev_mode: bool,
pub log_level: slog::Level,
pub hotplug_timeout: time::Duration,
pub debug_console_vport: i32,
pub log_vport: i32,
pub container_pipe_size: i32,
}
impl agentConfig {
@@ -40,9 +33,6 @@ impl agentConfig {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
debug_console_vport: 0,
log_vport: 0,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
}
}
@@ -70,40 +60,12 @@ impl agentConfig {
self.hotplug_timeout = hotplugTimeout;
}
}
if param.starts_with(format!("{}=", DEBUG_CONSOLE_VPORT_OPTION).as_str()) {
let port = get_vsock_port(param)?;
if port > 0 {
self.debug_console_vport = port;
}
}
if param.starts_with(format!("{}=", LOG_VPORT_OPTION).as_str()) {
let port = get_vsock_port(param)?;
if port > 0 {
self.log_vport = port;
}
}
if param.starts_with(format!("{}=", CONTAINER_PIPE_SIZE_OPTION).as_str()) {
let container_pipe_size = get_container_pipe_size(param)?;
self.container_pipe_size = container_pipe_size
}
}
Ok(())
}
}
fn get_vsock_port(p: &str) -> Result<i32> {
let fields: Vec<&str> = p.split("=").collect();
if fields.len() != 2 {
return Err(ErrorKind::ErrorCode("invalid port parameter".to_string()).into());
}
Ok(fields[1].parse::<i32>()?)
}
// Map logrus (https://godoc.org/github.com/sirupsen/logrus)
// log level to the equivalent slog log levels.
//
@@ -165,40 +127,6 @@ fn get_hotplug_timeout(param: &str) -> Result<time::Duration> {
Ok(time::Duration::from_secs(value.unwrap()))
}
fn get_container_pipe_size(param: &str) -> Result<i32> {
let fields: Vec<&str> = param.split("=").collect();
if fields.len() != 2 {
return Err(
ErrorKind::ErrorCode(String::from("invalid container pipe size parameter")).into(),
);
}
let key = fields[0];
if key != CONTAINER_PIPE_SIZE_OPTION {
return Err(
ErrorKind::ErrorCode(String::from("invalid container pipe size key name")).into(),
);
}
let res = fields[1].parse::<i32>();
if res.is_err() {
return Err(
ErrorKind::ErrorCode(String::from("unable to parse container pipe size")).into(),
);
}
let value = res.unwrap();
if value < 0 {
return Err(ErrorKind::ErrorCode(String::from(
"container pipe size should not be negative",
))
.into());
}
Ok(value)
}
#[cfg(test)]
mod tests {
use super::*;
@@ -215,11 +143,6 @@ mod tests {
const ERR_INVALID_HOTPLUG_TIMEOUT_PARAM: &str = "unable to parse hotplug timeout";
const ERR_INVALID_HOTPLUG_TIMEOUT_KEY: &str = "invalid hotplug timeout key name";
const ERR_INVALID_CONTAINER_PIPE_SIZE: &str = "invalid container pipe size parameter";
const ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM: &str = "unable to parse container pipe size";
const ERR_INVALID_CONTAINER_PIPE_SIZE_KEY: &str = "invalid container pipe size key name";
const ERR_INVALID_CONTAINER_PIPE_NEGATIVE: &str = "container pipe size should not be negative";
// helper function to make errors less crazy-long
fn make_err(desc: &str) -> Error {
ErrorKind::ErrorCode(desc.to_string()).into()
@@ -266,7 +189,6 @@ mod tests {
dev_mode: bool,
log_level: slog::Level,
hotplug_timeout: time::Duration,
container_pipe_size: i32,
}
let tests = &[
@@ -276,7 +198,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.debug_console agent.devmodex",
@@ -284,7 +205,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.logx=debug",
@@ -292,7 +212,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.log=debug",
@@ -300,7 +219,6 @@ mod tests {
dev_mode: false,
log_level: slog::Level::Debug,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "",
@@ -308,7 +226,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo",
@@ -316,7 +233,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo bar",
@@ -324,7 +240,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo bar",
@@ -332,7 +247,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo agent bar",
@@ -340,7 +254,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo debug_console agent bar devmode",
@@ -348,7 +261,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.debug_console",
@@ -356,7 +268,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: " agent.debug_console ",
@@ -364,7 +275,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.debug_console foo",
@@ -372,7 +282,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: " agent.debug_console foo",
@@ -380,7 +289,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo agent.debug_console bar",
@@ -388,7 +296,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo agent.debug_console",
@@ -396,7 +303,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo agent.debug_console ",
@@ -404,7 +310,6 @@ mod tests {
dev_mode: false,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.devmode",
@@ -412,7 +317,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: " agent.devmode ",
@@ -420,7 +324,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.devmode foo",
@@ -428,7 +331,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: " agent.devmode foo",
@@ -436,7 +338,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo agent.devmode bar",
@@ -444,7 +345,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo agent.devmode",
@@ -452,7 +352,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "foo agent.devmode ",
@@ -460,7 +359,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.devmode agent.debug_console",
@@ -468,7 +366,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=100",
@@ -476,7 +373,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: time::Duration::from_secs(100),
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.devmode agent.debug_console agent.hotplug_timeout=0",
@@ -484,39 +380,6 @@ mod tests {
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=2097152",
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: 2097152,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=100",
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: 100,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pipe_size=0",
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
TestData {
contents: "agent.devmode agent.debug_console agent.container_pip_siz=100",
debug_console: true,
dev_mode: true,
log_level: DEFAULT_LOG_LEVEL,
hotplug_timeout: DEFAULT_HOTPLUG_TIMEOUT,
container_pipe_size: DEFAULT_CONTAINER_PIPE_SIZE,
},
];
@@ -546,15 +409,9 @@ mod tests {
.expect(&format!("{}: failed to write file contents", msg));
let mut config = agentConfig::new();
assert_eq!(config.debug_console, false, "{}", msg);
assert_eq!(config.dev_mode, false, "{}", msg);
assert_eq!(
config.hotplug_timeout,
time::Duration::from_secs(3),
"{}",
msg
);
assert_eq!(config.container_pipe_size, 0, "{}", msg);
assert!(config.debug_console == false, msg);
assert!(config.dev_mode == false, msg);
assert!(config.hotplug_timeout == time::Duration::from_secs(3), msg);
let result = config.parse_cmdline(filename);
assert!(result.is_ok(), "{}", msg);
@@ -563,7 +420,6 @@ mod tests {
assert_eq!(d.dev_mode, config.dev_mode, "{}", msg);
assert_eq!(d.log_level, config.log_level, "{}", msg);
assert_eq!(d.hotplug_timeout, config.hotplug_timeout, "{}", msg);
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
}
}
@@ -804,78 +660,4 @@ mod tests {
assert_result!(d.result, result, format!("{}", msg));
}
}
#[test]
fn test_get_container_pipe_size() {
#[derive(Debug)]
struct TestData<'a> {
param: &'a str,
result: Result<i32>,
}
let tests = &[
TestData {
param: "",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE)),
},
TestData {
param: "agent.container_pipe_size",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE)),
},
TestData {
param: "foo=bar",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_KEY)),
},
TestData {
param: "agent.container_pip_siz=1",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_KEY)),
},
TestData {
param: "agent.container_pipe_size=1",
result: Ok(1),
},
TestData {
param: "agent.container_pipe_size=3",
result: Ok(3),
},
TestData {
param: "agent.container_pipe_size=2097152",
result: Ok(2097152),
},
TestData {
param: "agent.container_pipe_size=0",
result: Ok(0),
},
TestData {
param: "agent.container_pipe_size=-1",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_NEGATIVE)),
},
TestData {
param: "agent.container_pipe_size=foobar",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
},
TestData {
param: "agent.container_pipe_size=j",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
},
TestData {
param: "agent.container_pipe_size=4jbsdja",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
},
TestData {
param: "agent.container_pipe_size=4294967296",
result: Err(make_err(ERR_INVALID_CONTAINER_PIPE_SIZE_PARAM)),
},
];
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = get_container_pipe_size(d.param);
let msg = format!("{}: result: {:?}", msg, result);
assert_result!(d.result, result, format!("{}", msg));
}
}
}

View File

@@ -36,12 +36,10 @@ use crate::namespace::{NSTYPEIPC, NSTYPEPID, NSTYPEUTS};
use crate::random;
use crate::sandbox::Sandbox;
use crate::version::{AGENT_VERSION, API_VERSION};
use crate::AGENT_CONFIG;
use netlink::{RtnlHandle, NETLINK_ROUTE};
use libc::{self, c_ushort, pid_t, winsize, TIOCSWINSZ};
use serde_json;
use std::convert::TryFrom;
use std::fs;
use std::os::unix::io::RawFd;
use std::os::unix::prelude::PermissionsExt;
@@ -81,15 +79,7 @@ impl agentService {
let sandbox;
let mut s;
let oci = match oci_spec.as_mut() {
Some(spec) => spec,
None => {
error!(sl!(), "no oci spec in the create container request!");
return Err(
ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into(),
);
}
};
let oci = oci_spec.as_mut().unwrap();
info!(sl!(), "receive createcontainer {}", &cid);
@@ -123,9 +113,7 @@ impl agentService {
// write spec to bundle path, hooks might
// read ocispec
let olddir = setup_bundle(oci)?;
// restore the cwd for kata-agent process.
defer!(unistd::chdir(&olddir).unwrap());
setup_bundle(oci)?;
let opts = CreateOpts {
cgroup_name: "".to_string(),
@@ -140,9 +128,8 @@ impl agentService {
let mut ctr: LinuxContainer =
LinuxContainer::new(cid.as_str(), CONTAINER_BASE, opts, &sl!())?;
let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size;
let p = if oci.Process.is_some() {
let tp = Process::new(&sl!(), oci.get_Process(), eid.as_str(), true, pipe_size)?;
let tp = Process::new(&sl!(), oci.get_Process(), eid.as_str(), true)?;
tp
} else {
info!(sl!(), "no process configurations!");
@@ -276,8 +263,7 @@ impl agentService {
return Err(ErrorKind::Nix(nix::Error::from_errno(nix::errno::Errno::EINVAL)).into());
};
let pipe_size = AGENT_CONFIG.read().unwrap().container_pipe_size;
let p = Process::new(&sl!(), ocip, exec_id.as_str(), false, pipe_size)?;
let p = Process::new(&sl!(), ocip, exec_id.as_str(), false)?;
let ctr = match sandbox.get_container(cid.as_str()) {
Some(v) => v,
@@ -307,7 +293,7 @@ impl agentService {
);
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), true)?;
let mut signal = Signal::try_from(req.signal as i32).unwrap();
let mut signal = Signal::from_c_int(req.signal as i32).unwrap();
// For container initProcess, if it hasn't installed handler for "SIGTERM" signal,
// it will ignore the "SIGTERM" signal sent to it, thus send it "SIGKILL" signal
@@ -929,19 +915,7 @@ impl protocols::agent_grpc::AgentService for agentService {
let eid = req.exec_id.clone();
let s = Arc::clone(&self.sandbox);
let mut sandbox = s.lock().unwrap();
let p = match find_process(&mut sandbox, cid.as_str(), eid.as_str(), false) {
Ok(v) => v,
Err(e) => {
let f = sink
.fail(RpcStatus::new(
RpcStatusCode::InvalidArgument,
Some(format!("invalid argument: {}", e)),
))
.map_err(|_e| error!(sl!(), "invalid argument"));
ctx.spawn(f);
return;
}
};
let p = find_process(&mut sandbox, cid.as_str(), eid.as_str(), false).unwrap();
if p.term_master.is_none() {
let f = sink
@@ -1764,7 +1738,7 @@ fn do_copy_file(req: &CopyFileRequest) -> Result<()> {
Ok(())
}
fn setup_bundle(gspec: &Spec) -> Result<PathBuf> {
fn setup_bundle(gspec: &Spec) -> Result<()> {
if gspec.Root.is_none() {
return Err(nix::Error::Sys(Errno::EINVAL).into());
}
@@ -1783,8 +1757,7 @@ fn setup_bundle(gspec: &Spec) -> Result<PathBuf> {
);
let _ = oci.save(config.as_str());
let olddir = unistd::getcwd().chain_err(|| "cannot getcwd")?;
unistd::chdir(bundle_path)?;
Ok(olddir)
Ok(())
}

View File

@@ -16,7 +16,7 @@ pub const SYSFS_PCI_BUS_RESCAN_FILE: &str = "/sys/bus/pci/rescan";
target_arch = "x86"
))]
pub const PCI_ROOT_BUS_PATH: &str = "/devices/pci0000:00";
#[cfg(target_arch = "aarch64")]
#[cfg(target_arch = "arm")]
pub const PCI_ROOT_BUS_PATH: &str = "/devices/platform/4010000000.pcie/pci0000:00";
pub const SYSFS_CPU_ONLINE_PATH: &str = "/sys/devices/system/cpu";

View File

@@ -2,8 +2,6 @@
//
// SPDX-License-Identifier: Apache-2.0
//
#[macro_use]
extern crate slog;
use slog::{BorrowedKV, Drain, Key, OwnedKV, OwnedKVList, Record, KV};
use std::collections::HashMap;
@@ -148,6 +146,12 @@ impl<D> RuntimeLevelFilter<D> {
level: Mutex::new(level),
}
}
fn set_level(&self, level: slog::Level) {
let mut log_level = self.level.lock().unwrap();
*log_level = level;
}
}
impl<D> Drain for RuntimeLevelFilter<D>

View File

@@ -20,17 +20,14 @@ extern crate signal_hook;
extern crate scan_fmt;
extern crate oci;
#[macro_use]
extern crate scopeguard;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_json;
#[macro_use]
extern crate netlink;
use futures::*;
use nix::fcntl::{self, OFlag};
use nix::sys::socket::{self, AddressFamily, SockAddr, SockFlag, SockType};
use nix::sys::wait::{self, WaitStatus};
use nix::unistd;
use prctl::set_child_subreaper;
@@ -38,7 +35,7 @@ use rustjail::errors::*;
use signal_hook::{iterator::Signals, SIGCHLD};
use std::collections::HashMap;
use std::env;
use std::fs::{self, File};
use std::fs;
use std::os::unix::fs as unixfs;
use std::os::unix::io::AsRawFd;
use std::path::Path;
@@ -50,6 +47,7 @@ use unistd::Pid;
mod config;
mod device;
mod linux_abi;
mod logging;
mod mount;
mod namespace;
mod network;
@@ -107,17 +105,13 @@ fn main() -> Result<()> {
lazy_static::initialize(&SHELLS);
lazy_static::initialize(&AGENT_CONFIG);
// support vsock log
let (rfd, wfd) = unistd::pipe2(OFlag::O_CLOEXEC)?;
let writer = unsafe { File::from_raw_fd(wfd) };
let agentConfig = AGENT_CONFIG.clone();
if unistd::getpid() == Pid::from_raw(1) {
// Init a temporary logger used by init agent as init process
// since before do the base mount, it wouldn't access "/proc/cmdline"
// to get the customzied debug level.
let writer = io::stdout();
let logger = logging::create_logger(NAME, "agent", slog::Level::Debug, writer);
init_agent_as_init(&logger)?;
}
@@ -131,32 +125,7 @@ fn main() -> Result<()> {
}
let config = agentConfig.read().unwrap();
let log_vport = config.log_vport as u32;
let log_handle = thread::spawn(move || -> Result<()> {
let mut reader = unsafe { File::from_raw_fd(rfd) };
if log_vport > 0 {
let listenfd = socket::socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::SOCK_CLOEXEC,
None,
)?;
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, log_vport);
socket::bind(listenfd, &addr)?;
socket::listen(listenfd, 1)?;
let datafd = socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?;
let mut log_writer = unsafe { File::from_raw_fd(datafd) };
let _ = io::copy(&mut reader, &mut log_writer)?;
let _ = unistd::close(listenfd);
let _ = unistd::close(datafd);
}
// copy log to stdout
let mut stdout_writer = io::stdout();
let _ = io::copy(&mut reader, &mut stdout_writer)?;
Ok(())
});
let writer = unsafe { File::from_raw_fd(wfd) };
let writer = io::stdout();
// Recreate a logger with the log level get from "/proc/cmdline".
let logger = logging::create_logger(NAME, "agent", config.log_level, writer);
@@ -174,14 +143,13 @@ fn main() -> Result<()> {
let _guard = slog_scope::set_global_logger(logger.new(o!("subsystem" => "grpc")));
let shells = SHELLS.clone();
let debug_console_vport = config.debug_console_vport as u32;
let shell_handle = if config.debug_console {
let thread_logger = logger.clone();
thread::spawn(move || {
let shells = shells.lock().unwrap();
let result = setup_debug_console(shells.to_vec(), debug_console_vport);
let result = setup_debug_console(shells.to_vec());
if result.is_err() {
// Report error, but don't fail
warn!(thread_logger, "failed to setup debug console";
@@ -228,7 +196,6 @@ fn main() -> Result<()> {
// let _ = rx.wait();
handle.join().unwrap();
let _ = log_handle.join();
if config.debug_console {
shell_handle.join().unwrap();
@@ -363,35 +330,18 @@ lazy_static! {
// pub static mut TRACE_MODE: ;
use crate::config::agentConfig;
use nix::fcntl::{self, OFlag};
use nix::sys::stat::Mode;
use std::os::unix::io::{FromRawFd, RawFd};
use std::path::PathBuf;
use std::process::{exit, Command, Stdio};
fn setup_debug_console(shells: Vec<String>, port: u32) -> Result<()> {
fn setup_debug_console(shells: Vec<String>) -> Result<()> {
for shell in shells.iter() {
let binary = PathBuf::from(shell);
if binary.exists() {
let f: RawFd = if port > 0 {
let listenfd = socket::socket(
AddressFamily::Vsock,
SockType::Stream,
SockFlag::SOCK_CLOEXEC,
None,
)?;
let addr = SockAddr::new_vsock(libc::VMADDR_CID_ANY, port);
socket::bind(listenfd, &addr)?;
socket::listen(listenfd, 1)?;
socket::accept4(listenfd, SockFlag::SOCK_CLOEXEC)?
} else {
let mut flags = OFlag::empty();
flags.insert(OFlag::O_RDWR);
flags.insert(OFlag::O_CLOEXEC);
fcntl::open(CONSOLE_PATH, flags, Mode::empty())?
};
let f: RawFd = fcntl::open(CONSOLE_PATH, OFlag::O_RDWR, Mode::empty())?;
let cmd = Command::new(shell)
.arg("-i")
.stdin(unsafe { Stdio::from_raw_fd(f) })
.stdout(unsafe { Stdio::from_raw_fd(f) })
.stderr(unsafe { Stdio::from_raw_fd(f) })
@@ -431,7 +381,7 @@ mod tests {
let mut shells = shells_ref.lock().unwrap();
shells.clear();
let result = setup_debug_console(shells.to_vec(), 0);
let result = setup_debug_console(shells.to_vec());
assert!(result.is_err());
assert_eq!(result.unwrap_err().to_string(), "Error Code: 'no shell'");
@@ -454,7 +404,7 @@ mod tests {
shells.push(shell);
let result = setup_debug_console(shells.to_vec(), 0);
let result = setup_debug_console(shells.to_vec());
assert!(result.is_err());
assert_eq!(

View File

@@ -97,22 +97,16 @@ impl Sandbox {
//
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn unset_sandbox_storage(&mut self, path: &str) -> Result<bool> {
pub fn unset_sandbox_storage(&mut self, path: &str) -> bool {
match self.storages.get_mut(path) {
None => {
return Err(ErrorKind::ErrorCode(format!(
"Sandbox storage with path {} not found",
path
))
.into())
}
None => return false,
Some(count) => {
*count -= 1;
if *count < 1 {
self.storages.remove(path);
return Ok(true);
return true;
}
return Ok(false);
false
}
}
}
@@ -136,15 +130,8 @@ impl Sandbox {
// It's assumed that caller is calling this method after
// acquiring a lock on sandbox.
pub fn unset_and_remove_sandbox_storage(&mut self, path: &str) -> Result<()> {
match self.unset_sandbox_storage(path) {
Ok(res) => {
if res {
return self.remove_sandbox_storage(path);
}
}
Err(err) => {
return Err(err);
}
if self.unset_sandbox_storage(path) {
return self.remove_sandbox_storage(path);
}
Ok(())
}
@@ -275,279 +262,3 @@ fn online_memory(logger: &Logger) -> Result<()> {
online_resources(logger, SYSFS_MEMORY_ONLINE_PATH, r"memory[0-9]+", -1)?;
Ok(())
}
#[cfg(test)]
mod tests {
//use rustjail::Error;
use super::Sandbox;
use crate::{mount::BareMount, skip_if_not_root};
use nix::mount::MsFlags;
use protocols::oci::{Linux, Root, Spec};
use rustjail::container::LinuxContainer;
use rustjail::specconv::CreateOpts;
use slog::Logger;
use tempfile::Builder;
fn bind_mount(src: &str, dst: &str, logger: &Logger) -> Result<(), rustjail::errors::Error> {
let baremount = BareMount::new(src, dst, "bind", MsFlags::MS_BIND, "", &logger);
baremount.mount()
}
#[test]
fn set_sandbox_storage() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let tmpdir = Builder::new().tempdir().unwrap();
let tmpdir_path = tmpdir.path().to_str().unwrap();
// Add a new sandbox storage
let new_storage = s.set_sandbox_storage(&tmpdir_path);
// Check the reference counter
let ref_count = s.storages[tmpdir_path];
assert_eq!(
ref_count, 1,
"Invalid refcount, got {} expected 1.",
ref_count
);
assert_eq!(new_storage, true);
// Use the existing sandbox storage
let new_storage = s.set_sandbox_storage(&tmpdir_path);
assert_eq!(new_storage, false, "Should be false as already exists.");
// Since we are using existing storage, the reference counter
// should be 2 by now.
let ref_count = s.storages[tmpdir_path];
assert_eq!(
ref_count, 2,
"Invalid refcount, got {} expected 2.",
ref_count
);
}
#[test]
fn remove_sandbox_storage() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let s = Sandbox::new(&logger).unwrap();
let tmpdir = Builder::new().tempdir().unwrap();
let tmpdir_path = tmpdir.path().to_str().unwrap();
let srcdir = Builder::new()
.prefix("src")
.tempdir_in(tmpdir_path)
.unwrap();
let srcdir_path = srcdir.path().to_str().unwrap();
let destdir = Builder::new()
.prefix("dest")
.tempdir_in(tmpdir_path)
.unwrap();
let destdir_path = destdir.path().to_str().unwrap();
let emptydir = Builder::new()
.prefix("empty")
.tempdir_in(tmpdir_path)
.unwrap();
assert!(
s.remove_sandbox_storage(&srcdir_path).is_err(),
"Expect Err as the directory i not a mountpoint"
);
assert!(s.remove_sandbox_storage("").is_err());
let invalid_dir = emptydir.path().join("invalid");
assert!(s
.remove_sandbox_storage(invalid_dir.to_str().unwrap())
.is_err());
// Now, create a double mount as this guarantees the directory cannot
// be deleted after the first umount.
for _i in 0..2 {
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
}
assert!(
s.remove_sandbox_storage(destdir_path).is_err(),
"Expect fail as deletion cannot happen due to the second mount."
);
// This time it should work as the previous two calls have undone the double
// mount.
assert!(s.remove_sandbox_storage(destdir_path).is_ok());
}
#[test]
#[allow(unused_assignments)]
fn unset_and_remove_sandbox_storage() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
// FIX: This test fails, not sure why yet.
assert!(
s.unset_and_remove_sandbox_storage("/tmp/testEphePath")
.is_err(),
"Should fail because sandbox storage doesn't exist"
);
let tmpdir = Builder::new().tempdir().unwrap();
let tmpdir_path = tmpdir.path().to_str().unwrap();
let srcdir = Builder::new()
.prefix("src")
.tempdir_in(tmpdir_path)
.unwrap();
let srcdir_path = srcdir.path().to_str().unwrap();
let destdir = Builder::new()
.prefix("dest")
.tempdir_in(tmpdir_path)
.unwrap();
let destdir_path = destdir.path().to_str().unwrap();
assert!(bind_mount(srcdir_path, destdir_path, &logger).is_ok());
assert_eq!(s.set_sandbox_storage(&destdir_path), true);
assert!(s.unset_and_remove_sandbox_storage(&destdir_path).is_ok());
let mut other_dir_str = String::new();
{
// Create another folder in a separate scope to ensure that is
// deleted
let other_dir = Builder::new()
.prefix("dir")
.tempdir_in(tmpdir_path)
.unwrap();
let other_dir_path = other_dir.path().to_str().unwrap();
other_dir_str = other_dir_path.to_string();
assert_eq!(s.set_sandbox_storage(&other_dir_path), true);
}
assert!(s.unset_and_remove_sandbox_storage(&other_dir_str).is_err());
}
#[test]
fn unset_sandbox_storage() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let storage_path = "/tmp/testEphe";
// Add a new sandbox storage
assert_eq!(s.set_sandbox_storage(&storage_path), true);
// Use the existing sandbox storage
assert_eq!(
s.set_sandbox_storage(&storage_path),
false,
"Expects false as the storage is not new."
);
assert_eq!(
s.unset_sandbox_storage(&storage_path).unwrap(),
false,
"Expects false as there is still a storage."
);
// Reference counter should decrement to 1.
let ref_count = s.storages[storage_path];
assert_eq!(
ref_count, 1,
"Invalid refcount, got {} expected 1.",
ref_count
);
assert_eq!(
s.unset_sandbox_storage(&storage_path).unwrap(),
true,
"Expects true as there is still a storage."
);
// Since no container is using this sandbox storage anymore
// there should not be any reference in sandbox struct
// for the given storage
assert!(
!s.storages.contains_key(storage_path),
"The storages map should not contain the key {}",
storage_path
);
// If no container is using the sandbox storage, the reference
// counter for it should not exist.
assert!(
s.unset_sandbox_storage(&storage_path).is_err(),
"Expects false as the reference counter should no exist."
);
}
fn create_dummy_opts() -> CreateOpts {
let mut root = Root::new();
root.Path = String::from("/");
let linux = Linux::new();
let mut spec = Spec::new();
spec.Root = Some(root).into();
spec.Linux = Some(linux).into();
CreateOpts {
cgroup_name: "".to_string(),
use_systemd_cgroup: false,
no_pivot_root: false,
no_new_keyring: false,
spec: Some(spec),
rootless_euid: false,
rootless_cgroup: false,
}
}
fn create_linuxcontainer() -> LinuxContainer {
LinuxContainer::new(
"some_id",
"/run/agent",
create_dummy_opts(),
&slog_scope::logger(),
)
.unwrap()
}
#[test]
fn get_container_entry_exist() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let linux_container = create_linuxcontainer();
s.containers
.insert("testContainerID".to_string(), linux_container);
let cnt = s.get_container("testContainerID");
assert!(cnt.is_some());
}
#[test]
fn get_container_no_entry() {
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let cnt = s.get_container("testContainerID");
assert!(cnt.is_none());
}
#[test]
fn add_and_get_container() {
skip_if_not_root!();
let logger = slog::Logger::root(slog::Discard, o!());
let mut s = Sandbox::new(&logger).unwrap();
let linux_container = create_linuxcontainer();
s.add_container(linux_container);
assert!(s.get_container("some_id").is_some());
}
}