runtime-rs: support dragonball and runtime-binary

Fixes: #3785
Signed-off-by: Quanwei Zhou <quanweiZhou@linux.alibaba.com>
Signed-off-by: Zhongtao Hu <zhongtaohu.tim@linux.alibaba.com>
This commit is contained in:
Quanwei Zhou 2021-12-03 18:53:48 +08:00 committed by quanwei.zqw
parent 3f6123b4dd
commit 3d6156f6ec
46 changed files with 2845 additions and 369 deletions

7
src/agent/Cargo.lock generated
View File

@ -98,6 +98,12 @@ version = "3.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3"
[[package]]
name = "byte-unit"
version = "3.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "415301c9de11005d4b92193c0eb7ac7adc37e5a49e0ac9bed0a42343512744b8"
[[package]]
name = "byteorder"
version = "1.4.3"
@ -683,6 +689,7 @@ dependencies = [
name = "kata-types"
version = "0.1.0"
dependencies = [
"byte-unit",
"glob",
"lazy_static",
"num_cpus",

View File

@ -365,7 +365,7 @@ impl DeviceOpContext {
pub(crate) fn remove_hotplug_mmio_device(
&self,
_dev: &Arc<dyn DeviceIo>,
_dev: &Arc<DbsMmioV2Device>,
_callback: Option<()>,
) -> Result<()> {
Err(DeviceMgrError::InvalidOperation)

View File

@ -728,6 +728,7 @@ impl Vm {
#[cfg(feature = "hotplug")]
impl Vm {
#[cfg(feature = "dbs-upcall")]
/// initialize upcall client for guest os
#[cfg(feature = "dbs-upcall")]
fn new_upcall(&mut self) -> std::result::Result<(), StartMicroVmError> {
@ -769,6 +770,7 @@ impl Vm {
}
}
#[cfg(feature = "dbs-upcall")]
/// Get upcall client.
#[cfg(feature = "dbs-upcall")]
pub fn upcall_client(&self) -> &Option<Arc<UpcallClient<DevMgrService>>> {

View File

@ -620,6 +620,10 @@ pub struct NetworkInfo {
/// Default 0-sized value means unlimited rate.
#[serde(default)]
pub tx_rate_limiter_max_rate: u64,
/// network queues
#[serde(default)]
pub network_queues: u32,
}
impl NetworkInfo {

1289
src/runtime-rs/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,12 @@
[workspace]
members = [
"crates/shim",
# TODO: current only for check, delete after use the agent crate
"crates/agent",
# TODO: current only for check, delete after use the resource crate
"crates/resource",
]
[patch.'crates-io']
dbs-device = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "84eee5737cc7d85f9921c94a93e6b9dc4ae24a39" }
dbs-utils = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "84eee5737cc7d85f9921c94a93e6b9dc4ae24a39" }
dbs-interrupt = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "84eee5737cc7d85f9921c94a93e6b9dc4ae24a39" }
dbs-legacy-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "84eee5737cc7d85f9921c94a93e6b9dc4ae24a39" }
dbs-virtio-devices = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "84eee5737cc7d85f9921c94a93e6b9dc4ae24a39" }
dbs-upcall = { git = "https://github.com/openanolis/dragonball-sandbox.git", rev = "84eee5737cc7d85f9921c94a93e6b9dc4ae24a39" }

View File

@ -122,6 +122,7 @@ MONITOR_OUTPUT = $(CURDIR)/$(MONITOR)
MONITOR_DIR = $(CLI_DIR)/kata-monitor
SOURCES := $(shell find . 2>&1 | grep -E '.*\.(c|h|go)$$')
VERSION := ${shell cat ./VERSION}
# List of configuration files to build and install
CONFIGS =
CONFIG_PATHS =
@ -279,7 +280,7 @@ TARGET_PATH = target/$(TRIPLE)/$(BUILD_TYPE)/$(TARGET)
##VAR DESTDIR=<path> is a directory prepended to each installed target file
DESTDIR :=
##VAR BINDIR=<path> is a directory for installing executable programs
BINDIR := /usr/bin
BINDIR := /usr/local/bin
GENERATED_CODE = crates/shim/src/config.rs
@ -330,7 +331,9 @@ endef
.DEFAULT_GOAL := default
##TARGET default: build code
default: $(TARGET) show-header
default: runtime show-header
runtime: $(TARGET)
$(TARGET): $(GENERATED_CODE) $(TARGET_PATH)
@ -418,6 +421,11 @@ codecov: check_tarpaulin
codecov-html: check_tarpaulin
cargo tarpaulin $(TARPAULIN_ARGS) -o Html
install: install-runtime install-configs
install-runtime: runtime
install -D $(TARGET_PATH) $(BINDIR)
install-configs: $(CONFIGS)
$(foreach f,$(CONFIGS),$(call INSTALL_CONFIG,$f,$(dir $(CONFIG_PATH)))) \
sudo ln -sf $(DEFAULT_HYPERVISOR_CONFIG) $(DESTDIR)/$(CONFIG_PATH)

View File

@ -16,7 +16,7 @@ serde = { version = "^1.0", features = ["derive"] }
serde_json = ">=1.0.9"
slog = "2.5.2"
slog-scope = "4.4.0"
ttrpc = { version = "0.6.0" }
ttrpc = { version = "0.6.1" }
tokio = { version = "1.8.0", features = ["fs", "rt"] }
url = "2.2.2"

View File

@ -9,13 +9,22 @@ edition = "2018"
[dependencies]
anyhow = "^1.0"
async-trait = "0.1.48"
dbs-utils = "0.1.0"
go-flag = "0.1.0"
libc = ">=0.2.39"
nix = "0.16.1"
seccompiler = "0.2.0"
serde_json = ">=1.0.9"
slog = "2.5.2"
slog-scope = "4.4.0"
thiserror = "1.0"
tokio = { version = "1.8.0", features = ["sync"] }
vmm-sys-util = "0.9.0"
kata-sys-util = { path = "../../../libs/kata-sys-util" }
kata-types = { path = "../../../libs/kata-types" }
logging = { path = "../../../libs/logging" }
dragonball = { path = "../../../dragonball", features = ["atomic-guest-memory", "virtio-vsock", "hotplug", "virtio-blk", "virtio-net", "virtio-fs"] }
[features]

View File

@ -0,0 +1,309 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::{collections::HashSet, fs::create_dir_all, path::PathBuf};
use anyhow::{anyhow, Context, Result};
use dragonball::{
api::v1::{BlockDeviceConfigInfo, BootSourceConfig},
vm::VmConfigInfo,
};
use kata_sys_util::mount;
use kata_types::config::hypervisor::Hypervisor as HypervisorConfig;
use super::{vmm_instance::VmmInstance, RUN_PATH_PREFIX};
use crate::{device::Device, kernel_param::KernelParams, VmmState, VM_ROOTFS_DRIVER_BLK};
const DRAGONBALL_KERNEL: &str = "vmlinux";
const DRAGONBALL_ROOT_FS: &str = "rootfs";
unsafe impl Send for DragonballInner {}
unsafe impl Sync for DragonballInner {}
pub struct DragonballInner {
/// sandbox id
pub(crate) id: String,
/// vm path
pub(crate) vm_path: String,
/// jailed flag
pub(crate) jailed: bool,
/// chroot base for the jailer
pub(crate) jailer_root: String,
/// netns
pub(crate) netns: Option<String>,
/// hypervisor config
pub(crate) config: HypervisorConfig,
/// vmm state
pub(crate) state: VmmState,
/// vmm instance
pub(crate) vmm_instance: VmmInstance,
/// hypervisor run dir
pub(crate) run_dir: String,
/// pending device
pub(crate) pending_devices: Vec<Device>,
/// cached block device
pub(crate) cached_block_devices: HashSet<String>,
}
impl DragonballInner {
pub fn new() -> DragonballInner {
DragonballInner {
id: "".to_string(),
vm_path: "".to_string(),
jailer_root: "".to_string(),
netns: None,
config: Default::default(),
pending_devices: vec![],
state: VmmState::NotReady,
jailed: false,
vmm_instance: VmmInstance::new(""),
run_dir: "".to_string(),
cached_block_devices: Default::default(),
}
}
pub(crate) async fn cold_start_vm(&mut self, timeout: i32) -> Result<()> {
info!(sl!(), "start sandbox cold");
self.set_vm_base_config().context("set vm base config")?;
// get rootfs driver
let rootfs_driver = self.config.blockdev_info.block_device_driver.clone();
// get kernel params
let mut kernel_params = KernelParams::new(self.config.debug_info.enable_debug);
kernel_params.append(&mut KernelParams::new_rootfs_kernel_params(&rootfs_driver));
kernel_params.append(&mut KernelParams::from_string(
&self.config.boot_info.kernel_params,
));
// set boot source
let kernel_path = self.config.boot_info.kernel.clone();
self.set_boot_source(
&kernel_path,
&kernel_params
.to_string()
.context("kernel params to string")?,
)
.context("set_boot_source")?;
// get vm rootfs
let image = {
let initrd_path = self.config.boot_info.initrd.clone();
let image_path = self.config.boot_info.image.clone();
if !initrd_path.is_empty() {
Ok(initrd_path)
} else if !image_path.is_empty() {
Ok(image_path)
} else {
Err(anyhow!("failed to get image"))
}
}
.context("get image")?;
self.set_vm_rootfs(&image, &rootfs_driver)
.context("set vm rootfs")?;
// add pending devices
while let Some(dev) = self.pending_devices.pop() {
self.add_device(dev).await.context("add_device")?;
}
// start vmm and wait ready
self.start_vmm_instance().context("start vmm instance")?;
self.wait_vmm_ready(timeout).context("wait vmm")?;
Ok(())
}
pub(crate) fn run_vmm_server(&mut self) -> Result<()> {
if !self.config.jailer_path.is_empty() {
self.jailed = true;
}
// create jailer root
create_dir_all(self.jailer_root.as_str())
.map_err(|e| anyhow!("Failed to create dir {} err : {:?}", self.jailer_root, e))?;
// create run dir
self.run_dir = [RUN_PATH_PREFIX, self.id.as_str()].join("/");
create_dir_all(self.run_dir.as_str())
.with_context(|| format!("failed to create dir {}", self.run_dir.as_str()))?;
// run vmm server
self.vmm_instance
.run_vmm_server(&self.id, self.netns.clone())
.context("run vmm server")?;
self.state = VmmState::VmmServerReady;
Ok(())
}
pub(crate) fn cleanup_resource(&self) {
if self.jailed {
self.umount_jail_resource(DRAGONBALL_KERNEL).ok();
self.umount_jail_resource(DRAGONBALL_ROOT_FS).ok();
for id in &self.cached_block_devices {
self.umount_jail_resource(id.as_str()).ok();
}
}
std::fs::remove_dir_all(&self.vm_path)
.map_err(|err| {
error!(sl!(), "failed to remove dir all for {}", &self.vm_path);
err
})
.ok();
}
fn set_vm_base_config(&mut self) -> Result<()> {
let serial_path = [&self.run_dir, "console.sock"].join("/");
let vm_config = VmConfigInfo {
serial_path: Some(serial_path),
mem_size_mib: self.config.memory_info.default_memory as usize,
vcpu_count: self.config.cpu_info.default_vcpus as u8,
..Default::default()
};
info!(sl!(), "vm config: {:?}", vm_config);
self.vmm_instance
.set_vm_configuration(vm_config)
.context("set vm configuration")
}
pub(crate) fn umount_jail_resource(&self, jailed_path: &str) -> Result<()> {
let path = [self.jailer_root.as_str(), jailed_path].join("/");
nix::mount::umount2(path.as_str(), nix::mount::MntFlags::MNT_DETACH)
.with_context(|| format!("umount path {}", &path))
}
pub(crate) fn get_resource(&self, src: &str, dst: &str) -> Result<String> {
if self.jailed {
self.jail_resource(src, dst)
} else {
Ok(src.to_string())
}
}
fn jail_resource(&self, src: &str, dst: &str) -> Result<String> {
info!(sl!(), "jail resource: src {} dst {}", src, dst);
if src.is_empty() || dst.is_empty() {
return Err(anyhow!("invalid param src {} dst {}", src, dst));
}
let jailed_location = [self.jailer_root.as_str(), dst].join("/");
mount::bind_mount_unchecked(src, jailed_location.as_str(), false).context("bind_mount")?;
let mut abs_path = String::from("/");
abs_path.push_str(dst);
Ok(abs_path)
}
fn set_boot_source(&mut self, kernel_path: &str, kernel_params: &str) -> Result<()> {
info!(
sl!(),
"kernel path {} kernel params {}", kernel_path, kernel_params
);
let mut boot_cfg = BootSourceConfig {
kernel_path: self
.get_resource(kernel_path, DRAGONBALL_KERNEL)
.context("get resource")?,
..Default::default()
};
if !kernel_params.is_empty() {
boot_cfg.boot_args = Some(kernel_params.to_string());
}
self.vmm_instance
.put_boot_source(boot_cfg)
.context("put boot source")
}
fn set_vm_rootfs(&mut self, path: &str, driver: &str) -> Result<()> {
info!(sl!(), "set vm rootfs {} {}", path, driver);
let jail_drive = self
.get_resource(path, DRAGONBALL_ROOT_FS)
.context("get resource")?;
if driver == VM_ROOTFS_DRIVER_BLK {
let blk_cfg = BlockDeviceConfigInfo {
path_on_host: PathBuf::from(jail_drive),
drive_id: DRAGONBALL_ROOT_FS.to_string(),
is_root_device: false,
// Add it as a regular block device
// This allows us to use a partitioned root block device
// is_read_only
is_read_only: true,
is_direct: false,
..Default::default()
};
self.vmm_instance
.insert_block_device(blk_cfg)
.context("inert block device")
} else {
Err(anyhow!(
"Unknown vm_rootfs driver {} path {:?}",
driver,
path
))
}
}
fn start_vmm_instance(&mut self) -> Result<()> {
info!(sl!(), "Starting VM");
self.vmm_instance
.instance_start()
.context("Failed to start vmm")?;
self.state = VmmState::VmRunning;
Ok(())
}
// wait_vmm_ready will wait for timeout seconds for the VMM to be up and running.
// This does not mean that the VM is up and running. It only indicates that the VMM is up and
// running and able to handle commands to setup and launch a VM
fn wait_vmm_ready(&mut self, timeout: i32) -> Result<()> {
if timeout < 0 {
return Err(anyhow!("Invalid param timeout {}", timeout));
}
let time_start = std::time::Instant::now();
loop {
match self.vmm_instance.is_running() {
Ok(_) => return Ok(()),
Err(err) => {
let time_now = std::time::Instant::now();
if time_now.duration_since(time_start).as_millis() > timeout as u128 {
return Err(anyhow!(
"waiting vmm ready timeout {} err: {:?}",
timeout,
err
));
}
std::thread::sleep(std::time::Duration::from_millis(10));
}
}
}
}
pub fn set_hypervisor_config(&mut self, config: HypervisorConfig) {
self.config = config;
}
pub fn hypervisor_config(&self) -> HypervisorConfig {
self.config.clone()
}
}

View File

@ -0,0 +1,316 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::path::PathBuf;
use anyhow::{anyhow, Context, Result};
use dbs_utils::net::MacAddr;
use dragonball::api::v1::{
BlockDeviceConfigInfo, FsDeviceConfigInfo, FsMountConfigInfo, VirtioNetDeviceConfigInfo,
VsockDeviceConfigInfo,
};
use super::DragonballInner;
use crate::{
device::Device, NetworkConfig, ShareFsDeviceConfig, ShareFsMountConfig, ShareFsMountType,
ShareFsOperation, VmmState, VsockConfig,
};
const MB_TO_B: u32 = 1024 * 1024;
const DEFAULT_VIRTIO_FS_NUM_QUEUES: i32 = 1;
const DEFAULT_VIRTIO_FS_QUEUE_SIZE: i32 = 1024;
const VIRTIO_FS: &str = "virtio-fs";
const INLINE_VIRTIO_FS: &str = "inline-virtio-fs";
pub(crate) fn drive_index_to_id(index: u64) -> String {
format!("drive_{}", index)
}
impl DragonballInner {
pub(crate) async fn add_device(&mut self, device: Device) -> Result<()> {
if self.state == VmmState::NotReady {
info!(sl!(), "VMM not ready, queueing device {}", device);
// add the pending device by reverse order, thus the
// start_vm would pop the devices in an right order
// to add the devices.
self.pending_devices.insert(0, device);
return Ok(());
}
info!(sl!(), "dragonball add device {:?}", &device);
match device {
Device::Network(config) => self.add_net_device(&config).context("add net device"),
Device::Vfio(_config) => {
todo!()
}
Device::Block(config) => self
.add_block_device(
config.path_on_host.as_str(),
config.id.as_str(),
config.is_readonly,
config.no_drop,
)
.context("add block device"),
Device::Vsock(config) => self.add_vsock(&config).context("add vsock"),
Device::ShareFsDevice(config) => self
.add_share_fs_device(&config)
.context("add share fs device"),
Device::ShareFsMount(config) => self
.add_share_fs_mount(&config)
.context("add share fs mount"),
}
}
pub(crate) async fn remove_device(&mut self, device: Device) -> Result<()> {
info!(sl!(), "remove device {} ", device);
match device {
Device::Block(config) => {
let drive_id = drive_index_to_id(config.index);
self.remove_block_drive(drive_id.as_str())
.context("remove block drive")
}
Device::Vfio(_config) => {
todo!()
}
_ => Err(anyhow!("unsupported device {:?}", device)),
}
}
fn add_block_device(
&mut self,
path: &str,
id: &str,
read_only: bool,
no_drop: bool,
) -> Result<()> {
let jailed_drive = self.get_resource(path, id).context("get resource")?;
self.cached_block_devices.insert(id.to_string());
let blk_cfg = BlockDeviceConfigInfo {
drive_id: id.to_string(),
path_on_host: PathBuf::from(jailed_drive),
is_direct: self.config.blockdev_info.block_device_cache_direct,
no_drop,
is_read_only: read_only,
..Default::default()
};
self.vmm_instance
.insert_block_device(blk_cfg)
.context("insert block device")
}
fn remove_block_drive(&mut self, id: &str) -> Result<()> {
self.vmm_instance
.remove_block_device(id)
.context("remove block device")?;
if self.cached_block_devices.contains(id) && self.jailed {
self.umount_jail_resource(id)
.context("umount jail resource")?;
self.cached_block_devices.remove(id);
}
Ok(())
}
fn add_net_device(&mut self, config: &NetworkConfig) -> Result<()> {
let iface_cfg = VirtioNetDeviceConfigInfo {
iface_id: config.id.clone(),
host_dev_name: config.host_dev_name.clone(),
guest_mac: match &config.guest_mac {
Some(mac) => MacAddr::from_bytes(&mac.0).ok(),
None => None,
},
..Default::default()
};
info!(
sl!(),
"add {} endpoint to {}", iface_cfg.host_dev_name, iface_cfg.iface_id
);
self.vmm_instance
.insert_network_device(iface_cfg)
.context("insert network device")
}
fn add_vsock(&mut self, config: &VsockConfig) -> Result<()> {
let vsock_cfg = VsockDeviceConfigInfo {
id: String::from("root"),
guest_cid: config.guest_cid,
uds_path: Some(config.uds_path.clone()),
..Default::default()
};
self.vmm_instance
.insert_vsock(vsock_cfg)
.context("insert vsock")
}
fn parse_inline_virtiofs_args(&self, fs_cfg: &mut FsDeviceConfigInfo) -> Result<()> {
let mut debug = false;
let mut opt_list = String::new();
fs_cfg.mode = String::from("virtio");
fs_cfg.cache_policy = self.config.shared_fs.virtio_fs_cache.clone();
fs_cfg.fuse_killpriv_v2 = true;
info!(
sl!(),
"args: {:?}", &self.config.shared_fs.virtio_fs_extra_args
);
let args = &self.config.shared_fs.virtio_fs_extra_args;
let _ = go_flag::parse_args_with_warnings::<String, _, _>(args, None, |flags| {
flags.add_flag("d", &mut debug);
flags.add_flag("thread-pool-size", &mut fs_cfg.thread_pool_size);
flags.add_flag("drop-sys-resource", &mut fs_cfg.drop_sys_resource);
flags.add_flag("o", &mut opt_list);
})
.with_context(|| format!("parse args: {:?}", args))?;
if debug {
warn!(
sl!(),
"Inline virtiofs \"-d\" option not implemented, ignore"
);
}
// Parse comma separated option list
if !opt_list.is_empty() {
let args: Vec<&str> = opt_list.split(',').collect();
for arg in args {
match arg {
"no_open" => fs_cfg.no_open = true,
"open" => fs_cfg.no_open = false,
"writeback_cache" => fs_cfg.writeback_cache = true,
"no_writeback_cache" => fs_cfg.writeback_cache = false,
"writeback" => fs_cfg.writeback_cache = true,
"no_writeback" => fs_cfg.writeback_cache = false,
"xattr" => fs_cfg.xattr = true,
"no_xattr" => fs_cfg.xattr = false,
"cache_symlinks" => {} // inline virtiofs always cache symlinks
"trace" => warn!(
sl!(),
"Inline virtiofs \"-o trace\" option not supported yet, ignored."
),
_ => warn!(sl!(), "Inline virtiofs unsupported option: {}", arg),
}
}
}
debug!(sl!(), "Inline virtiofs config {:?}", fs_cfg);
Ok(())
}
fn add_share_fs_device(&self, config: &ShareFsDeviceConfig) -> Result<()> {
let mut fs_cfg = FsDeviceConfigInfo {
sock_path: config.sock_path.clone(),
tag: config.mount_tag.clone(),
num_queues: if config.queue_num > 0 {
config.queue_size as usize
} else {
DEFAULT_VIRTIO_FS_NUM_QUEUES as usize
},
queue_size: if config.queue_size > 0 {
config.queue_size as u16
} else {
DEFAULT_VIRTIO_FS_QUEUE_SIZE as u16
},
cache_size: (self.config.shared_fs.virtio_fs_cache_size as u64)
.saturating_mul(MB_TO_B as u64),
..Default::default()
};
self.do_add_fs_device(&config.fs_type, &mut fs_cfg)
}
fn do_add_fs_device(&self, fs_type: &str, fs_cfg: &mut FsDeviceConfigInfo) -> Result<()> {
match fs_type {
VIRTIO_FS => {
fs_cfg.mode = String::from("vhostuser");
}
INLINE_VIRTIO_FS => {
self.parse_inline_virtiofs_args(fs_cfg)?;
}
_ => {
return Err(anyhow!(
"hypervisor isn't configured with shared_fs supported"
));
}
}
self.vmm_instance
.insert_fs(fs_cfg)
.map_err(|e| anyhow!("insert {} fs error. {:?}", fs_cfg.mode, e))
}
fn add_share_fs_mount(&mut self, config: &ShareFsMountConfig) -> Result<()> {
let ops = match config.op {
ShareFsOperation::Mount => "mount",
ShareFsOperation::Umount => "umount",
ShareFsOperation::Update => "update",
};
let fstype = match config.fstype {
ShareFsMountType::PASSTHROUGH => "passthroughfs",
ShareFsMountType::RAFS => "rafs",
};
let cfg = FsMountConfigInfo {
ops: ops.to_string(),
fstype: Some(fstype.to_string()),
source: Some(config.source.clone()),
mountpoint: config.mount_point.clone(),
config: None,
tag: config.tag.clone(),
prefetch_list_path: config.prefetch_list_path.clone(),
dax_threshold_size_kb: None,
};
self.vmm_instance.patch_fs(&cfg, config.op).map_err(|e| {
anyhow!(
"{:?} {} at {} error: {:?}",
config.op,
fstype,
config.mount_point.clone(),
e
)
})
}
}
#[cfg(test)]
mod tests {
use dragonball::api::v1::FsDeviceConfigInfo;
use crate::dragonball::DragonballInner;
#[test]
fn test_parse_inline_virtiofs_args() {
let mut dragonball = DragonballInner::new();
let mut fs_cfg = FsDeviceConfigInfo::default();
// no_open and writeback_cache is the default, so test open and no_writeback_cache. "-d"
// and "trace" are ignored for now, but should not return error.
dragonball.config.shared_fs.virtio_fs_extra_args = vec![
"-o".to_string(),
"open,no_writeback_cache,xattr,trace".to_string(),
"--thread-pool-size=128".to_string(),
"--drop-sys-resource".to_string(),
"-d".to_string(),
];
dragonball.config.shared_fs.virtio_fs_cache = "auto".to_string();
dragonball.parse_inline_virtiofs_args(&mut fs_cfg).unwrap();
assert!(!fs_cfg.no_open);
assert!(fs_cfg.xattr);
assert!(fs_cfg.fuse_killpriv_v2);
assert!(!fs_cfg.writeback_cache);
assert_eq!(fs_cfg.cache_policy, "auto".to_string());
assert!(fs_cfg.drop_sys_resource);
assert!(fs_cfg.thread_pool_size == 128);
}
}

View File

@ -0,0 +1,137 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::{
collections::{HashMap, HashSet},
iter::FromIterator,
};
use anyhow::{Context, Result};
use super::inner::DragonballInner;
use crate::{utils, VcpuThreadIds, VmmState};
const KATA_PATH: &str = "/run/kata";
const DEFAULT_HYBRID_VSOCK_NAME: &str = "kata.hvsock";
fn get_vsock_path(root: &str) -> String {
[root, DEFAULT_HYBRID_VSOCK_NAME].join("/")
}
impl DragonballInner {
pub(crate) async fn prepare_vm(&mut self, id: &str, netns: Option<String>) -> Result<()> {
self.id = id.to_string();
self.state = VmmState::NotReady;
self.vm_path = [KATA_PATH, id].join("/");
self.jailer_root = [self.vm_path.as_str(), "root"].join("/");
self.netns = netns;
// prepare vsock
let uds_path = [&self.jailer_root, DEFAULT_HYBRID_VSOCK_NAME].join("/");
let d = crate::device::Device::Vsock(crate::device::VsockConfig {
id: format!("vsock-{}", &self.id),
guest_cid: 3,
uds_path,
});
self.add_device(d).await.context("add device")?;
Ok(())
}
// start_vm will start the hypervisor for the given sandbox.
// In the context of dragonball, this will start the hypervisor
pub(crate) async fn start_vm(&mut self, timeout: i32) -> Result<()> {
self.run_vmm_server().context("start vmm server")?;
self.cold_start_vm(timeout).await.map_err(|error| {
error!(sl!(), "start micro vm error {:?}", error);
if let Err(err) = self.stop_vm() {
error!(sl!(), "failed to call end err : {:?}", err);
}
error
})?;
Ok(())
}
pub(crate) fn stop_vm(&mut self) -> Result<()> {
info!(sl!(), "Stopping dragonball VM");
self.vmm_instance.stop().context("stop")?;
Ok(())
}
pub(crate) fn pause_vm(&self) -> Result<()> {
info!(sl!(), "do pause vm");
self.vmm_instance.pause().context("pause vm")?;
Ok(())
}
pub(crate) fn resume_vm(&self) -> Result<()> {
info!(sl!(), "do resume vm");
self.vmm_instance.resume().context("resume vm")?;
Ok(())
}
pub(crate) async fn save_vm(&self) -> Result<()> {
todo!()
}
pub(crate) async fn get_agent_socket(&self) -> Result<String> {
const HYBRID_VSOCK_SCHEME: &str = "hvsock";
Ok(format!(
"{}://{}",
HYBRID_VSOCK_SCHEME,
get_vsock_path(&self.jailer_root),
))
}
pub(crate) async fn disconnect(&mut self) {
self.state = VmmState::NotReady;
}
pub(crate) async fn get_thread_ids(&self) -> Result<VcpuThreadIds> {
let mut vcpu_thread_ids: VcpuThreadIds = VcpuThreadIds {
vcpus: HashMap::new(),
};
for tid in self.vmm_instance.get_vcpu_tids() {
vcpu_thread_ids.vcpus.insert(tid.0 as u32, tid.1 as u32);
}
info!(sl!(), "get thread ids {:?}", vcpu_thread_ids);
Ok(vcpu_thread_ids)
}
pub(crate) async fn cleanup(&self) -> Result<()> {
self.cleanup_resource();
Ok(())
}
pub(crate) async fn get_pids(&self) -> Result<Vec<u32>> {
let mut pids = HashSet::new();
// get shim thread ids
pids.insert(self.vmm_instance.pid());
for tid in utils::get_child_threads(self.vmm_instance.pid()) {
pids.insert(tid);
}
// remove vcpus
for tid in self.vmm_instance.get_vcpu_tids() {
pids.remove(&tid.1);
}
info!(sl!(), "get pids {:?}", pids);
Ok(Vec::from_iter(pids.into_iter()))
}
pub(crate) async fn check(&self) -> Result<()> {
Ok(())
}
pub(crate) async fn get_jailer_root(&self) -> Result<String> {
Ok(self.jailer_root.clone())
}
}

View File

@ -0,0 +1,130 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
mod inner;
mod inner_device;
mod inner_hypervisor;
use inner::DragonballInner;
pub mod vmm_instance;
pub const RUN_PATH_PREFIX: &str = "/run/kata";
use std::sync::Arc;
use anyhow::Result;
use async_trait::async_trait;
use kata_types::config::hypervisor::Hypervisor as HypervisorConfig;
use tokio::sync::RwLock;
use crate::{device::Device, Hypervisor, VcpuThreadIds};
unsafe impl Send for Dragonball {}
unsafe impl Sync for Dragonball {}
pub struct Dragonball {
inner: Arc<RwLock<DragonballInner>>,
}
impl Default for Dragonball {
fn default() -> Self {
Self::new()
}
}
impl Dragonball {
pub fn new() -> Self {
Self {
inner: Arc::new(RwLock::new(DragonballInner::new())),
}
}
pub async fn set_hypervisor_config(&mut self, config: HypervisorConfig) {
let mut inner = self.inner.write().await;
inner.set_hypervisor_config(config)
}
}
#[async_trait]
impl Hypervisor for Dragonball {
async fn prepare_vm(&self, id: &str, netns: Option<String>) -> Result<()> {
let mut inner = self.inner.write().await;
inner.prepare_vm(id, netns).await
}
async fn start_vm(&self, timeout: i32) -> Result<()> {
let mut inner = self.inner.write().await;
inner.start_vm(timeout).await
}
async fn stop_vm(&self) -> Result<()> {
let mut inner = self.inner.write().await;
inner.stop_vm()
}
async fn pause_vm(&self) -> Result<()> {
let inner = self.inner.read().await;
inner.pause_vm()
}
async fn resume_vm(&self) -> Result<()> {
let inner = self.inner.read().await;
inner.resume_vm()
}
async fn save_vm(&self) -> Result<()> {
let inner = self.inner.read().await;
inner.save_vm().await
}
async fn add_device(&self, device: Device) -> Result<()> {
let mut inner = self.inner.write().await;
inner.add_device(device).await
}
async fn remove_device(&self, device: Device) -> Result<()> {
let mut inner = self.inner.write().await;
inner.remove_device(device).await
}
async fn get_agent_socket(&self) -> Result<String> {
let inner = self.inner.read().await;
inner.get_agent_socket().await
}
async fn disconnect(&self) {
let mut inner = self.inner.write().await;
inner.disconnect().await
}
async fn hypervisor_config(&self) -> HypervisorConfig {
let inner = self.inner.read().await;
inner.hypervisor_config()
}
async fn get_thread_ids(&self) -> Result<VcpuThreadIds> {
let inner = self.inner.read().await;
inner.get_thread_ids().await
}
async fn cleanup(&self) -> Result<()> {
let inner = self.inner.read().await;
inner.cleanup().await
}
async fn get_pids(&self) -> Result<Vec<u32>> {
let inner = self.inner.read().await;
inner.get_pids().await
}
async fn check(&self) -> Result<()> {
let inner = self.inner.read().await;
inner.check().await
}
async fn get_jailer_root(&self) -> Result<String> {
let inner = self.inner.read().await;
inner.get_jailer_root().await
}
}

View File

@ -0,0 +1,335 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::{
fs::{File, OpenOptions},
os::unix::{io::IntoRawFd, prelude::AsRawFd},
sync::{
mpsc::{channel, Receiver, Sender},
Arc, Mutex, RwLock,
},
thread,
};
use anyhow::{anyhow, Context, Result};
use dragonball::{
api::v1::{
BlockDeviceConfigInfo, BootSourceConfig, FsDeviceConfigInfo, FsMountConfigInfo,
InstanceInfo, InstanceState, VirtioNetDeviceConfigInfo, VmmAction, VmmActionError, VmmData,
VmmRequest, VmmResponse, VmmService, VsockDeviceConfigInfo,
},
vm::VmConfigInfo,
Vmm,
};
use nix::sched::{setns, CloneFlags};
use seccompiler::BpfProgram;
use vmm_sys_util::eventfd::EventFd;
use crate::ShareFsOperation;
pub enum Request {
Sync(VmmAction),
}
const DRAGONBALL_VERSION: &str = env!("CARGO_PKG_VERSION");
const REQUEST_RETRY: u32 = 500;
const KVM_DEVICE: &str = "/dev/kvm";
pub struct VmmInstance {
/// VMM instance info directly accessible from runtime
vmm_shared_info: Arc<RwLock<InstanceInfo>>,
to_vmm: Option<Sender<VmmRequest>>,
from_vmm: Option<Receiver<VmmResponse>>,
to_vmm_fd: EventFd,
seccomp: BpfProgram,
vmm_thread: Option<thread::JoinHandle<Result<i32>>>,
}
impl VmmInstance {
pub fn new(id: &str) -> Self {
let vmm_shared_info = Arc::new(RwLock::new(InstanceInfo::new(
String::from(id),
DRAGONBALL_VERSION.to_string(),
)));
let to_vmm_fd = EventFd::new(libc::EFD_NONBLOCK)
.unwrap_or_else(|_| panic!("Failed to create eventfd for vmm {}", id));
VmmInstance {
vmm_shared_info,
to_vmm: None,
from_vmm: None,
to_vmm_fd,
seccomp: vec![],
vmm_thread: None,
}
}
pub fn get_shared_info(&self) -> Arc<RwLock<InstanceInfo>> {
self.vmm_shared_info.clone()
}
fn set_instance_id(&mut self, id: &str) {
let share_info_lock = self.vmm_shared_info.clone();
share_info_lock.write().unwrap().id = String::from(id);
}
pub fn get_vcpu_tids(&self) -> Vec<(u8, u32)> {
let info = self.vmm_shared_info.clone();
let result = info.read().unwrap().tids.clone();
result
}
pub fn run_vmm_server(&mut self, id: &str, netns: Option<String>) -> Result<()> {
let kvm = OpenOptions::new().read(true).write(true).open(KVM_DEVICE)?;
let (to_vmm, from_runtime) = channel();
let (to_runtime, from_vmm) = channel();
self.set_instance_id(id);
let vmm_service = VmmService::new(from_runtime, to_runtime);
self.to_vmm = Some(to_vmm);
self.from_vmm = Some(from_vmm);
let api_event_fd2 = self.to_vmm_fd.try_clone().expect("Failed to dup eventfd");
let vmm = Vmm::new(
self.vmm_shared_info.clone(),
api_event_fd2,
self.seccomp.clone(),
self.seccomp.clone(),
Some(kvm.into_raw_fd()),
)
.expect("Failed to start vmm");
self.vmm_thread = Some(
thread::Builder::new()
.name("vmm_master".to_owned())
.spawn(move || {
|| -> Result<i32> {
debug!(sl!(), "run vmm thread start");
if let Some(netns_path) = netns {
info!(sl!(), "set netns for vmm master {}", &netns_path);
let netns_fd = File::open(&netns_path)
.with_context(|| format!("open netns path {}", &netns_path))?;
setns(netns_fd.as_raw_fd(), CloneFlags::CLONE_NEWNET)
.context("set netns ")?;
}
let exit_code =
Vmm::run_vmm_event_loop(Arc::new(Mutex::new(vmm)), vmm_service);
debug!(sl!(), "run vmm thread exited: {}", exit_code);
Ok(exit_code)
}()
.map_err(|e| {
error!(sl!(), "run vmm thread err. {:?}", e);
e
})
})
.expect("Failed to start vmm event loop"),
);
Ok(())
}
pub fn put_boot_source(&self, boot_source_cfg: BootSourceConfig) -> Result<()> {
self.handle_request(Request::Sync(VmmAction::ConfigureBootSource(
boot_source_cfg,
)))
.context("Failed to configure boot source")?;
Ok(())
}
pub fn instance_start(&self) -> Result<()> {
self.handle_request(Request::Sync(VmmAction::StartMicroVm))
.context("Failed to start MicroVm")?;
Ok(())
}
pub fn is_uninitialized(&self) -> bool {
let share_info = self
.vmm_shared_info
.read()
.expect("Failed to read share_info due to poisoned lock");
matches!(share_info.state, InstanceState::Uninitialized)
}
pub fn is_running(&self) -> Result<()> {
let share_info_lock = self.vmm_shared_info.clone();
let share_info = share_info_lock
.read()
.expect("Failed to read share_info due to poisoned lock");
if let InstanceState::Running = share_info.state {
return Ok(());
}
Err(anyhow!("vmm is not running"))
}
pub fn get_machine_info(&self) -> Result<Box<VmConfigInfo>> {
if let Ok(VmmData::MachineConfiguration(vm_config)) =
self.handle_request(Request::Sync(VmmAction::GetVmConfiguration))
{
return Ok(vm_config);
}
Err(anyhow!("Failed to get machine info"))
}
pub fn insert_block_device(&self, device_cfg: BlockDeviceConfigInfo) -> Result<()> {
self.handle_request_with_retry(Request::Sync(VmmAction::InsertBlockDevice(
device_cfg.clone(),
)))
.with_context(|| format!("Failed to insert block device {:?}", device_cfg))?;
Ok(())
}
pub fn remove_block_device(&self, id: &str) -> Result<()> {
info!(sl!(), "remove block device {}", id);
self.handle_request(Request::Sync(VmmAction::RemoveBlockDevice(id.to_string())))
.with_context(|| format!("Failed to remove block device {:?}", id))?;
Ok(())
}
pub fn set_vm_configuration(&self, vm_config: VmConfigInfo) -> Result<()> {
self.handle_request(Request::Sync(VmmAction::SetVmConfiguration(
vm_config.clone(),
)))
.with_context(|| format!("Failed to set vm configuration {:?}", vm_config))?;
Ok(())
}
pub fn insert_network_device(&self, net_cfg: VirtioNetDeviceConfigInfo) -> Result<()> {
self.handle_request_with_retry(Request::Sync(VmmAction::InsertNetworkDevice(
net_cfg.clone(),
)))
.with_context(|| format!("Failed to insert network device {:?}", net_cfg))?;
Ok(())
}
pub fn insert_vsock(&self, vsock_cfg: VsockDeviceConfigInfo) -> Result<()> {
self.handle_request(Request::Sync(VmmAction::InsertVsockDevice(
vsock_cfg.clone(),
)))
.with_context(|| format!("Failed to insert vsock device {:?}", vsock_cfg))?;
Ok(())
}
pub fn insert_fs(&self, fs_cfg: &FsDeviceConfigInfo) -> Result<()> {
self.handle_request(Request::Sync(VmmAction::InsertFsDevice(fs_cfg.clone())))
.with_context(|| format!("Failed to insert {} fs device {:?}", fs_cfg.mode, fs_cfg))?;
Ok(())
}
pub fn patch_fs(&self, cfg: &FsMountConfigInfo, op: ShareFsOperation) -> Result<()> {
self.handle_request(Request::Sync(VmmAction::ManipulateFsBackendFs(cfg.clone())))
.with_context(|| {
format!(
"Failed to {:?} backend {:?} at {} mount config {:?}",
op, cfg.fstype, cfg.mountpoint, cfg
)
})?;
Ok(())
}
pub fn pause(&self) -> Result<()> {
todo!()
}
pub fn resume(&self) -> Result<()> {
todo!()
}
pub fn pid(&self) -> u32 {
std::process::id()
}
pub fn stop(&mut self) -> Result<()> {
self.handle_request(Request::Sync(VmmAction::ShutdownMicroVm))
.map_err(|e| {
warn!(sl!(), "Failed to shutdown MicroVM. {}", e);
e
})
.ok();
// vmm is not running, join thread will be hang.
if self.is_uninitialized() || self.vmm_thread.is_none() {
debug!(sl!(), "vmm-master thread is uninitialized or has exited.");
return Ok(());
}
debug!(sl!(), "join vmm-master thread exit.");
// vmm_thread must be exited, otherwise there will be other sync issues.
// unwrap is safe, if vmm_thread is None, impossible run to here.
self.vmm_thread.take().unwrap().join().ok();
info!(sl!(), "vmm-master thread join succeed.");
Ok(())
}
fn send_request(&self, vmm_action: VmmAction) -> Result<VmmResponse> {
if let Some(ref to_vmm) = self.to_vmm {
to_vmm
.send(Box::new(vmm_action.clone()))
.with_context(|| format!("Failed to send {:?} via channel ", vmm_action))?;
} else {
return Err(anyhow!("to_vmm is None"));
}
//notify vmm action
if let Err(e) = self.to_vmm_fd.write(1) {
return Err(anyhow!("failed to notify vmm: {}", e));
}
if let Some(from_vmm) = self.from_vmm.as_ref() {
match from_vmm.recv() {
Err(e) => Err(anyhow!("vmm recv err: {}", e)),
Ok(vmm_outcome) => Ok(vmm_outcome),
}
} else {
Err(anyhow!("from_vmm is None"))
}
}
fn handle_request(&self, req: Request) -> Result<VmmData> {
let Request::Sync(vmm_action) = req;
match self.send_request(vmm_action) {
Ok(vmm_outcome) => match *vmm_outcome {
Ok(vmm_data) => Ok(vmm_data),
Err(vmm_action_error) => Err(anyhow!("vmm action error: {:?}", vmm_action_error)),
},
Err(e) => Err(e),
}
}
fn handle_request_with_retry(&self, req: Request) -> Result<VmmData> {
let Request::Sync(vmm_action) = req;
for count in 0..REQUEST_RETRY {
match self.send_request(vmm_action.clone()) {
Ok(vmm_outcome) => match *vmm_outcome {
Ok(vmm_data) => {
info!(
sl!(),
"success to send {:?} after retry {}", &vmm_action, count
);
return Ok(vmm_data);
}
Err(vmm_action_error) => {
if let VmmActionError::UpcallNotReady = vmm_action_error {
std::thread::sleep(std::time::Duration::from_millis(10));
continue;
} else {
return Err(vmm_action_error.into());
}
}
},
Err(err) => {
return Err(err);
}
}
}
return Err(anyhow::anyhow!(
"After {} attempts, it still doesn't work.",
REQUEST_RETRY
));
}
}

View File

@ -0,0 +1,177 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use anyhow::{anyhow, Result};
use crate::{VM_ROOTFS_DRIVER_BLK, VM_ROOTFS_DRIVER_PMEM};
// Port where the agent will send the logs. Logs are sent through the vsock in cases
// where the hypervisor has no console.sock, i.e dragonball
const VSOCK_LOGS_PORT: &str = "1025";
const KERNEL_KV_DELIMITER: &str = "=";
const KERNEL_PARAM_DELIMITER: &str = " ";
#[derive(Debug, Clone, PartialEq)]
pub struct Param {
pub key: String,
pub value: String,
}
impl Param {
pub fn new(key: &str, value: &str) -> Self {
Param {
key: key.to_owned(),
value: value.to_owned(),
}
}
}
#[derive(Debug, PartialEq)]
pub(crate) struct KernelParams {
params: Vec<Param>,
}
impl KernelParams {
pub(crate) fn new(debug: bool) -> Self {
// default kernel params
let mut params = vec![
Param::new("reboot", "k"),
Param::new("earlyprintk", "ttyS0"),
Param::new("initcall_debug", ""),
Param::new("panic", "1"),
Param::new("systemd.unit", "kata-containers.target"),
Param::new("systemd.mask", "systemd-networkd.service"),
];
if debug {
params.push(Param::new("agent.log_vport", VSOCK_LOGS_PORT));
}
Self { params }
}
pub(crate) fn new_rootfs_kernel_params(rootfs_driver: &str) -> Self {
let params = match rootfs_driver {
VM_ROOTFS_DRIVER_BLK => {
vec![
Param {
key: "root".to_string(),
value: "/dev/vda1".to_string(),
},
Param {
key: "rootflags".to_string(),
value: "data=ordered,errors=remount-ro ro".to_string(),
},
Param {
key: "rootfstype".to_string(),
value: "ext4".to_string(),
},
]
}
VM_ROOTFS_DRIVER_PMEM => {
vec![
Param {
key: "root".to_string(),
value: "/dev/pmem0p1".to_string(),
},
Param {
key: "rootflags".to_string(),
value: "data=ordered,errors=remount-ro,dax ro".to_string(),
},
Param {
key: "rootfstype".to_string(),
value: "ext4".to_string(),
},
]
}
_ => vec![],
};
Self { params }
}
pub(crate) fn append(&mut self, params: &mut KernelParams) {
self.params.append(&mut params.params);
}
pub(crate) fn from_string(params_string: &str) -> Self {
let mut params = vec![];
let parameters_vec: Vec<&str> = params_string.split(KERNEL_PARAM_DELIMITER).collect();
for param in parameters_vec.iter() {
if param.is_empty() {
continue;
}
let ps: Vec<&str> = param.splitn::<_>(2, KERNEL_KV_DELIMITER).collect();
if ps.len() == 2 {
params.push(Param {
key: String::from(ps[0]),
value: String::from(ps[1]),
});
} else {
params.push(Param {
key: String::from(ps[0]),
value: String::from(""),
});
}
}
Self { params }
}
pub(crate) fn to_string(&self) -> Result<String> {
let mut parameters: Vec<String> = Vec::new();
for param in &self.params {
if param.key.is_empty() && param.value.is_empty() {
return Err(anyhow!("Empty key and value"));
} else if param.key.is_empty() {
return Err(anyhow!("Empty key"));
} else if param.value.is_empty() {
parameters.push(param.key.to_string());
} else {
parameters.push(format!(
"{}{}{}",
param.key, KERNEL_KV_DELIMITER, param.value
));
}
}
Ok(parameters.join(KERNEL_PARAM_DELIMITER))
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use super::*;
#[test]
fn test_kernel_params() -> Result<()> {
let expect_params_string = "k1=v1 k2=v2 k3=v3".to_string();
let expect_params = KernelParams {
params: vec![
Param::new("k1", "v1"),
Param::new("k2", "v2"),
Param::new("k3", "v3"),
],
};
// check kernel params from string
let kernel_params = KernelParams::from_string(&expect_params_string);
assert_eq!(kernel_params, expect_params);
// check kernel params to string
let kernel_params_string = expect_params.to_string()?;
assert_eq!(kernel_params_string, expect_params_string);
Ok(())
}
}

View File

@ -11,6 +11,10 @@ logging::logger_with_subsystem!(sl, "hypervisor");
pub mod device;
pub use device::*;
pub mod dragonball;
mod kernel_param;
pub use kernel_param::Param;
mod utils;
use std::collections::HashMap;
@ -18,9 +22,20 @@ use anyhow::Result;
use async_trait::async_trait;
use kata_types::config::hypervisor::Hypervisor as HypervisorConfig;
// Config which driver to use as vm root dev
const VM_ROOTFS_DRIVER_BLK: &str = "virtio-blk";
const VM_ROOTFS_DRIVER_PMEM: &str = "virtio-pmem";
#[derive(PartialEq)]
pub(crate) enum VmmState {
NotReady,
VmmServerReady,
VmRunning,
}
// vcpu mapping from vcpu number to thread number
#[derive(Debug)]
pub struct VcpuThreadIds {
/// List of tids of vcpu threads (vcpu index, tid)
pub vcpus: HashMap<u32, u32>,
}

View File

@ -0,0 +1,27 @@
// Copyright (c) 2019-2022 Alibaba Cloud
// Copyright (c) 2019-2022 Ant Group
//
// SPDX-License-Identifier: Apache-2.0
//
use std::collections::HashSet;
pub fn get_child_threads(pid: u32) -> HashSet<u32> {
let mut result = HashSet::new();
let path_name = format!("/proc/{}/task", pid);
let path = std::path::Path::new(path_name.as_str());
if path.is_dir() {
if let Ok(dir) = path.read_dir() {
for entity in dir {
if let Ok(entity) = entity.as_ref() {
let file_name = entity.file_name();
let file_name = file_name.to_str().unwrap_or_default();
if let Ok(tid) = file_name.parse::<u32>() {
result.insert(tid);
}
}
}
}
}
result
}

View File

@ -4,7 +4,7 @@
// SPDX-License-Identifier: Apache-2.0
//
use std::{convert::TryFrom, net::Ipv4Addr};
use std::convert::TryFrom;
use agent::{ARPNeighbor, IPAddress, IPFamily, Interface, Route};
use anyhow::{Context, Result};
@ -16,7 +16,7 @@ use netlink_packet_route::{
use super::NetworkInfo;
use crate::network::utils::{
address::Address,
address::{parse_ip, Address},
link::{self, LinkAttrs},
};
@ -66,10 +66,15 @@ async fn handle_addresses(handle: &rtnetlink::Handle, attrs: &LinkAttrs) -> Resu
.set_link_index_filter(attrs.index)
.execute();
let mut addresses = Vec::new();
while let Some(addr_msg) = addr_msg_list.try_next().await? {
if addr_msg.header.family as i32 != libc::AF_INET {
warn!(sl!(), "unsupported ipv6 addr. {:?}", addr_msg);
let mut addresses = vec![];
while let Some(addr_msg) = addr_msg_list
.try_next()
.await
.context("try next address msg")?
{
let family = addr_msg.header.family as i32;
if family != libc::AF_INET && family != libc::AF_INET6 {
warn!(sl!(), "unsupported ip family {}", family);
continue;
}
let a = Address::try_from(addr_msg).context("get addr from msg")?;
@ -99,12 +104,13 @@ fn generate_neigh(name: &str, n: &NeighbourMessage) -> Result<ARPNeighbor> {
for nla in &n.nlas {
match nla {
Nla::Destination(addr) => {
if addr.len() != 4 {
continue;
}
let dest = Ipv4Addr::new(addr[0], addr[1], addr[2], addr[3]);
let dest = parse_ip(addr, n.header.family).context("parse ip")?;
let addr = Some(IPAddress {
family: IPFamily::V4,
family: if dest.is_ipv4() {
IPFamily::V4
} else {
IPFamily::V6
},
address: dest.to_string(),
mask: "".to_string(),
});
@ -136,7 +142,11 @@ async fn handle_neighbors(
let name = &attrs.name;
let mut neighs = vec![];
let mut neigh_msg_list = handle.neighbours().get().execute();
while let Some(neigh) = neigh_msg_list.try_next().await? {
while let Some(neigh) = neigh_msg_list
.try_next()
.await
.context("try next neigh msg")?
{
// get neigh filter with index
if neigh.header.ifindex == attrs.index {
neighs.push(generate_neigh(name, &neigh).context("generate neigh")?)
@ -170,10 +180,14 @@ fn generate_route(name: &str, route: &RouteMessage) -> Result<Option<Route>> {
}))
}
async fn handle_routes(handle: &rtnetlink::Handle, attrs: &LinkAttrs) -> Result<Vec<Route>> {
async fn get_route_from_msg(
routes: &mut Vec<Route>,
handle: &rtnetlink::Handle,
attrs: &LinkAttrs,
ip_version: rtnetlink::IpVersion,
) -> Result<()> {
let name = &attrs.name;
let mut routes = vec![];
let mut route_msg_list = handle.route().get(rtnetlink::IpVersion::V4).execute();
let mut route_msg_list = handle.route().get(ip_version).execute();
while let Some(route) = route_msg_list.try_next().await? {
// get route filter with index
if let Some(index) = route.output_interface() {
@ -184,6 +198,17 @@ async fn handle_routes(handle: &rtnetlink::Handle, attrs: &LinkAttrs) -> Result<
}
}
}
Ok(())
}
async fn handle_routes(handle: &rtnetlink::Handle, attrs: &LinkAttrs) -> Result<Vec<Route>> {
let mut routes = vec![];
get_route_from_msg(&mut routes, handle, attrs, rtnetlink::IpVersion::V4)
.await
.context("get ip v4 route")?;
get_route_from_msg(&mut routes, handle, attrs, rtnetlink::IpVersion::V6)
.await
.context("get ip v6 route")?;
Ok(routes)
}

View File

@ -70,13 +70,13 @@ impl NetworkModel for RouteModel {
// change sysctl for tap0_kata
// echo 1 > /proc/sys/net/ipv4/conf/tap0_kata/accept_local
let accept_local_path = format!("/proc/sys/net/ipv4/conf/{}/accept_local", &tap_name);
std::fs::write(&accept_local_path, "1".to_string())
std::fs::write(&accept_local_path, "1")
.with_context(|| format!("Failed to echo 1 > {}", &accept_local_path))?;
// echo 1 > /proc/sys/net/ipv4/conf/eth0/proxy_arp
// This enabled ARP reply on peer eth0 to prevent without any reply on VPC
let proxy_arp_path = format!("/proc/sys/net/ipv4/conf/{}/proxy_arp", &virt_name);
std::fs::write(&proxy_arp_path, "1".to_string())
std::fs::write(&proxy_arp_path, "1")
.with_context(|| format!("Failed to echo 1 > {}", &proxy_arp_path))?;
Ok(())

View File

@ -196,6 +196,8 @@ async fn create_endpoint(
.context("network info from link")?,
);
info!(sl!(), "network info {:?}", network_info);
Ok((endpoint, network_info))
}

View File

@ -41,19 +41,13 @@ impl TryFrom<AddressMessage> for Address {
valid_ltf: 0,
};
let mut local = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
let mut dst = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
for nla in nlas.into_iter() {
match nla {
Nla::Address(a) => {
dst = parse_ip(a, header.family)?;
}
Nla::Local(a) => {
local = parse_ip(a, header.family)?;
addr.addr = parse_ip(&a, header.family)?;
}
Nla::Broadcast(b) => {
addr.broadcast = parse_ip(b, header.family)?;
addr.broadcast = parse_ip(&b, header.family)?;
}
Nla::Label(l) => {
addr.label = l;
@ -66,27 +60,11 @@ impl TryFrom<AddressMessage> for Address {
}
}
// IPv6 sends the local address as IFA_ADDRESS with no
// IFA_LOCAL, IPv4 sends both IFA_LOCAL and IFA_ADDRESS
// with IFA_ADDRESS being the peer address if they differ
//
// But obviously, as there are IPv6 PtP addresses, too,
// IFA_LOCAL should also be handled for IPv6.
if local.is_unspecified() {
if header.family == AF_INET as u8 && local == dst {
addr.addr = dst;
} else {
addr.addr = local;
addr.peer = dst;
}
} else {
addr.addr = dst;
}
Ok(addr)
}
}
fn parse_ip(ip: Vec<u8>, family: u8) -> Result<IpAddr> {
pub(crate) fn parse_ip(ip: &Vec<u8>, family: u8) -> Result<IpAddr> {
let support_len = if family as u16 == AF_INET { 4 } else { 16 };
if ip.len() != support_len {
return Err(anyhow!(

View File

@ -58,10 +58,7 @@ impl RootFsResource {
// Safe as single_layer_rootfs must have one layer
let layer = &mounts_vec[0];
let rootfs = if let Some(_dev_id) = get_block_device(&layer.source) {
// block rootfs
unimplemented!()
} else if let Some(share_fs) = share_fs {
let rootfs = if let Some(share_fs) = share_fs {
// share fs rootfs
let share_fs_mount = share_fs.get_share_fs_mount();
share_fs_rootfs::ShareFsRootfs::new(&share_fs_mount, cid, bundle_path, layer)
@ -102,6 +99,7 @@ fn is_single_layer_rootfs(rootfs_mounts: &[Mount]) -> bool {
rootfs_mounts.len() == 1
}
#[allow(dead_code)]
fn get_block_device(file_path: &str) -> Option<u64> {
if file_path.is_empty() {
return None;

View File

@ -15,7 +15,7 @@ use super::utils;
pub(crate) const MOUNT_GUEST_TAG: &str = "kataShared";
pub(crate) const PASSTHROUGH_FS_DIR: &str = "passthrough";
pub(crate) const FS_TYPE_VIRTIO_FS: &str = "virtio_fs";
pub(crate) const FS_TYPE_VIRTIO_FS: &str = "virtiofs";
pub(crate) const KATA_VIRTIO_FS_DEV_TYPE: &str = "virtio-fs";
const VIRTIO_FS_SOCKET: &str = "virtiofsd.sock";

View File

@ -22,10 +22,7 @@ use super::{
};
lazy_static! {
pub(crate) static ref SHARED_DIR_VIRTIO_FS_OPTIONS: Vec::<String> = vec![
String::from("default_permissions,allow_other,rootmode=040000,user_id=0,group_id=0"),
String::from("nodev"),
];
pub(crate) static ref SHARED_DIR_VIRTIO_FS_OPTIONS: Vec::<String> = vec![String::from("nodev")];
}
#[derive(Debug, Clone)]
@ -70,16 +67,13 @@ impl ShareFs for ShareVirtioFsInline {
// setup storage
let mut storages: Vec<Storage> = Vec::new();
let mut shared_options = SHARED_DIR_VIRTIO_FS_OPTIONS.clone();
shared_options.push(format!("tag={}", MOUNT_GUEST_TAG));
let shared_volume: Storage = Storage {
driver: String::from(KATA_VIRTIO_FS_DEV_TYPE),
driver_options: Vec::new(),
source: String::from(MOUNT_GUEST_TAG),
fs_type: String::from(FS_TYPE_VIRTIO_FS),
fs_group: None,
options: shared_options,
options: SHARED_DIR_VIRTIO_FS_OPTIONS.clone(),
mount_point: String::from(KATA_GUEST_SHARE_DIR),
};

View File

@ -133,6 +133,7 @@ impl ShareVirtioFsStandalone {
}
}
}
inner.pid = None;
Ok(())
}

View File

@ -53,9 +53,9 @@ pub(crate) fn get_host_rw_shared_path(id: &str) -> PathBuf {
fn do_get_guest_any_path(target: &str, cid: &str, is_volume: bool, is_virtiofs: bool) -> String {
let dir = PASSTHROUGH_FS_DIR;
let guest_share_dir = if is_virtiofs {
Path::new("/")
Path::new("/").to_path_buf()
} else {
Path::new(KATA_GUEST_SHARE_DIR)
Path::new(KATA_GUEST_SHARE_DIR).to_path_buf()
};
let path = if is_volume && !is_virtiofs {

View File

@ -19,7 +19,7 @@ slog-scope = "4.4.0"
strum = { version = "0.24.0", features = ["derive"] }
thiserror = "^1.0"
tokio = { version = "1.8.0", features = ["rt-multi-thread", "process", "fs"] }
ttrpc = { version = "0.6.0" }
ttrpc = { version = "0.6.1" }
agent = { path = "../../agent" }
kata-sys-util = { path = "../../../../libs/kata-sys-util" }

View File

@ -7,6 +7,7 @@ use std::sync::Arc;
use anyhow::Result;
use async_trait::async_trait;
use kata_types::config::TomlConfig;
use tokio::sync::mpsc::Sender;
use crate::{message::Message, ContainerManager, Sandbox};
@ -31,8 +32,12 @@ pub trait RuntimeHandler: Send + Sync {
where
Self: Sized;
async fn new_instance(&self, sid: &str, msg_sender: Sender<Message>)
-> Result<RuntimeInstance>;
async fn new_instance(
&self,
sid: &str,
msg_sender: Sender<Message>,
config: &TomlConfig,
) -> Result<RuntimeInstance>;
fn cleanup(&self, id: &str) -> Result<()>;
}

View File

@ -10,3 +10,4 @@ async-trait = "0.1.48"
tokio = { version = "1.8.0" }
common = { path = "../common" }
kata-types = { path = "../../../../libs/kata-types" }

View File

@ -8,6 +8,7 @@ use std::sync::Arc;
use anyhow::Result;
use async_trait::async_trait;
use common::{message::Message, RuntimeHandler, RuntimeInstance};
use kata_types::config::TomlConfig;
use tokio::sync::mpsc::Sender;
unsafe impl Send for LinuxContainer {}
@ -32,6 +33,7 @@ impl RuntimeHandler for LinuxContainer {
&self,
_sid: &str,
_msg_sender: Sender<Message>,
_config: &TomlConfig,
) -> Result<RuntimeInstance> {
todo!()
}

View File

@ -55,7 +55,7 @@ impl RuntimeHandlerManagerInner {
_ => return Err(anyhow!("Unsupported runtime: {}", &config.runtime.name)),
};
let runtime_instance = runtime_handler
.new_instance(&self.id, self.msg_sender.clone())
.new_instance(&self.id, self.msg_sender.clone(), config)
.await
.context("new runtime instance")?;
@ -276,6 +276,9 @@ fn load_config(spec: &oci::Spec) -> Result<TomlConfig> {
String::from("")
};
info!(sl!(), "get config path {:?}", &config_path);
let (toml_config, _) = TomlConfig::load_from_file(&config_path).context("load toml config")?;
let (mut toml_config, _) =
TomlConfig::load_from_file(&config_path).context("load toml config")?;
annotation.update_config_by_annotation(&mut toml_config)?;
info!(sl!(), "get config content {:?}", &toml_config);
Ok(toml_config)
}

View File

@ -130,8 +130,7 @@ impl Container {
&config.container_id,
spec.linux
.as_ref()
.map(|linux| linux.resources.as_ref())
.flatten(),
.and_then(|linux| linux.resources.as_ref()),
)
.await?;
@ -299,7 +298,7 @@ impl Container {
pub async fn pause(&self) -> Result<()> {
let inner = self.inner.read().await;
if inner.init_process.status == ProcessStatus::Paused {
if inner.init_process.get_status().await == ProcessStatus::Paused {
warn!(self.logger, "container is paused no need to pause");
return Ok(());
}
@ -312,7 +311,7 @@ impl Container {
pub async fn resume(&self) -> Result<()> {
let inner = self.inner.read().await;
if inner.init_process.status == ProcessStatus::Running {
if inner.init_process.get_status().await == ProcessStatus::Running {
warn!(self.logger, "container is running no need to resume");
return Ok(());
}
@ -331,8 +330,8 @@ impl Container {
) -> Result<()> {
let logger = logger_with_process(process);
let inner = self.inner.read().await;
if inner.init_process.status != ProcessStatus::Running {
warn!(logger, "container is running no need to resume");
if inner.init_process.get_status().await != ProcessStatus::Running {
warn!(logger, "container is not running");
return Ok(());
}
self.agent

View File

@ -49,8 +49,8 @@ impl ContainerInner {
self.init_process.process.container_id()
}
pub(crate) fn check_state(&self, states: Vec<ProcessStatus>) -> Result<()> {
let state = self.init_process.status;
pub(crate) async fn check_state(&self, states: Vec<ProcessStatus>) -> Result<()> {
let state = self.init_process.get_status().await;
if states.contains(&state) {
return Ok(());
}
@ -62,8 +62,9 @@ impl ContainerInner {
))
}
pub(crate) fn set_state(&mut self, state: ProcessStatus) {
self.init_process.status = state;
pub(crate) async fn set_state(&mut self, state: ProcessStatus) {
let mut status = self.init_process.status.write().await;
*status = state;
}
pub(crate) async fn start_exec_process(&mut self, process: &ContainerProcess) -> Result<()> {
@ -79,9 +80,9 @@ impl ContainerInner {
process: Some(exec.oci_process.clone()),
})
.await
.map(|_| {
exec.process.status = ProcessStatus::Running;
})
.context("exec process")?;
exec.process.set_status(ProcessStatus::Running).await;
Ok(())
}
pub(crate) async fn win_resize_process(
@ -91,6 +92,7 @@ impl ContainerInner {
width: u32,
) -> Result<()> {
self.check_state(vec![ProcessStatus::Created, ProcessStatus::Running])
.await
.context("check state")?;
self.agent
@ -118,6 +120,7 @@ impl ContainerInner {
pub(crate) async fn start_container(&mut self, cid: &ContainerID) -> Result<()> {
self.check_state(vec![ProcessStatus::Created, ProcessStatus::Stopped])
.await
.context("check state")?;
self.agent
@ -127,7 +130,7 @@ impl ContainerInner {
.await
.context("start container")?;
self.set_state(ProcessStatus::Running);
self.set_state(ProcessStatus::Running).await;
Ok(())
}
@ -179,7 +182,7 @@ impl ContainerInner {
// close the exit channel to wakeup wait service
// send to notify watchers who are waiting for the process exit
self.init_process.stop();
self.init_process.stop().await;
Ok(())
}
@ -192,6 +195,7 @@ impl ContainerInner {
info!(logger, "begin to stop process");
// do not stop again when state stopped, may cause multi cleanup resource
self.check_state(vec![ProcessStatus::Running])
.await
.context("check state")?;
// if use force mode to stop container, stop always successful
@ -215,7 +219,7 @@ impl ContainerInner {
.exec_processes
.get_mut(&process.exec_id)
.ok_or_else(|| anyhow!("failed to find exec"))?;
exec.process.stop();
exec.process.stop().await;
}
}

View File

@ -96,10 +96,11 @@ impl ContainerManager for VirtContainerManager {
let c = containers
.get(container_id)
.ok_or_else(|| Error::ContainerNotFound(container_id.to_string()))?;
let state = c.state_process(process).await.context("state process");
c.delete_exec_process(process)
.await
.context("delete process")?;
c.state_process(process).await.context("state process")
return state;
}
}
}

View File

@ -39,7 +39,7 @@ pub struct Process {
pub height: u32,
pub width: u32,
pub status: ProcessStatus,
pub status: Arc<RwLock<ProcessStatus>>,
pub exit_status: Arc<RwLock<ProcessExitStatus>>,
pub exit_watcher_rx: Option<watch::Receiver<bool>>,
@ -73,7 +73,7 @@ impl Process {
terminal,
height: 0,
width: 0,
status: ProcessStatus::Created,
status: Arc::new(RwLock::new(ProcessStatus::Created)),
exit_status: Arc::new(RwLock::new(ProcessExitStatus::new())),
exit_watcher_rx: Some(receiver),
exit_watcher_tx: Some(sender),
@ -133,8 +133,8 @@ impl Process {
let logger = self.logger.new(o!("io name" => io_name));
let _ = tokio::spawn(async move {
match tokio::io::copy(&mut reader, &mut writer).await {
Err(e) => warn!(logger, "io: failed to copy stdin stream {}", e),
Ok(length) => warn!(logger, "io: stop to copy stdin stream length {}", length),
Err(e) => warn!(logger, "io: failed to copy stream {}", e),
Ok(length) => warn!(logger, "io: stop to copy stream length {}", length),
};
wgw.done();
@ -147,8 +147,9 @@ impl Process {
let logger = self.logger.clone();
info!(logger, "start run io wait");
let process = self.process.clone();
let status = self.exit_status.clone();
let exit_status = self.exit_status.clone();
let exit_notifier = self.exit_watcher_tx.take();
let status = self.status.clone();
let _ = tokio::spawn(async move {
//wait on all of the container's io stream terminated
@ -171,8 +172,13 @@ impl Process {
info!(logger, "end wait process exit code {}", resp.status);
let mut locked_status = status.write().await;
locked_status.update_exit_code(resp.status);
let mut exit_status = exit_status.write().await;
exit_status.update_exit_code(resp.status);
drop(exit_status);
let mut status = status.write().await;
*status = ProcessStatus::Stopped;
drop(status);
drop(exit_notifier);
info!(logger, "end io wait thread");
@ -195,17 +201,28 @@ impl Process {
stdout: self.stdout.clone(),
stderr: self.stderr.clone(),
terminal: self.terminal,
status: self.status,
status: self.get_status().await,
exit_status: exit_status.exit_code,
exited_at: exit_status.exit_time,
})
}
pub fn stop(&mut self) {
self.status = ProcessStatus::Stopped;
pub async fn stop(&mut self) {
let mut status = self.status.write().await;
*status = ProcessStatus::Stopped;
}
pub async fn close_io(&mut self) {
self.wg_stdin.wait().await;
}
pub async fn get_status(&self) -> ProcessStatus {
let status = self.status.read().await;
*status
}
pub async fn set_status(&self, new_status: ProcessStatus) {
let mut status = self.status.write().await;
*status = new_status;
}
}

View File

@ -16,14 +16,16 @@ pub mod sandbox;
use std::sync::Arc;
use agent::kata::KataAgent;
use anyhow::{Context, Result};
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use common::{message::Message, RuntimeHandler, RuntimeInstance};
use hypervisor::Hypervisor;
use hypervisor::{dragonball::Dragonball, Hypervisor};
use kata_types::config::{hypervisor::register_hypervisor_plugin, DragonballConfig, TomlConfig};
use resource::ResourceManager;
use tokio::sync::mpsc::Sender;
const HYPERVISOR_DRAGONBALL: &str = "dragonball";
unsafe impl Send for VirtContainer {}
unsafe impl Sync for VirtContainer {}
pub struct VirtContainer {}
@ -49,14 +51,9 @@ impl RuntimeHandler for VirtContainer {
&self,
sid: &str,
msg_sender: Sender<Message>,
config: &TomlConfig,
) -> Result<RuntimeInstance> {
let (toml_config, _) = TomlConfig::load_from_file("").context("load config")?;
// TODO: new sandbox and container manager
// TODO: get from hypervisor
let hypervisor = new_hypervisor(&toml_config)
.await
.context("new hypervisor")?;
let hypervisor = new_hypervisor(config).await.context("new hypervisor")?;
// get uds from hypervisor and get config from toml_config
let agent = Arc::new(KataAgent::new(kata_types::config::Agent {
@ -77,7 +74,7 @@ impl RuntimeHandler for VirtContainer {
sid,
agent.clone(),
hypervisor.clone(),
&toml_config,
config,
)?);
let pid = std::process::id();
@ -104,7 +101,24 @@ impl RuntimeHandler for VirtContainer {
}
}
async fn new_hypervisor(_toml_config: &TomlConfig) -> Result<Arc<dyn Hypervisor>> {
// TODO: implement ready hypervisor
todo!()
async fn new_hypervisor(toml_config: &TomlConfig) -> Result<Arc<dyn Hypervisor>> {
let hypervisor_name = &toml_config.runtime.hypervisor_name;
let hypervisor_config = toml_config
.hypervisor
.get(hypervisor_name)
.ok_or_else(|| anyhow!("failed to get hypervisor for {}", &hypervisor_name))
.context("get hypervisor")?;
// TODO: support other hypervisor
// issue: https://github.com/kata-containers/kata-containers/issues/4634
match hypervisor_name.as_str() {
HYPERVISOR_DRAGONBALL => {
let mut hypervisor = Dragonball::new();
hypervisor
.set_hypervisor_config(hypervisor_config.clone())
.await;
Ok(Arc::new(hypervisor))
}
_ => Err(anyhow!("Unsupported hypervisor {}", &hypervisor_name)),
}
}

View File

@ -16,7 +16,10 @@ use common::{
use containerd_shim_protos::events::task::TaskOOM;
use hypervisor::Hypervisor;
use kata_types::config::TomlConfig;
use resource::{ResourceConfig, ResourceManager};
use resource::{
network::{NetworkConfig, NetworkWithNetNsConfig},
ResourceConfig, ResourceManager,
};
use tokio::sync::{mpsc::Sender, Mutex, RwLock};
use crate::health_check::HealthCheck;
@ -68,19 +71,32 @@ impl VirtSandbox {
agent,
hypervisor,
resource_manager,
monitor: Arc::new(HealthCheck::new(true, true)),
monitor: Arc::new(HealthCheck::new(true, false)),
})
}
async fn prepare_for_start_sandbox(
&self,
_id: &str,
netns: Option<String>,
_config: &TomlConfig,
config: &TomlConfig,
) -> Result<Vec<ResourceConfig>> {
let mut resource_configs = vec![];
if let Some(_netns_path) = netns {
// TODO: support network
if let Some(netns_path) = netns {
let network_config = ResourceConfig::Network(NetworkConfig::NetworkResourceWithNetNs(
NetworkWithNetNsConfig {
network_model: config.runtime.internetworking_model.clone(),
netns_path,
queues: self
.hypervisor
.hypervisor_config()
.await
.network_info
.network_queues as usize,
},
));
resource_configs.push(network_config);
}
let hypervisor_config = self.hypervisor.hypervisor_config().await;
@ -111,7 +127,7 @@ impl Sandbox for VirtSandbox {
// generate device and setup before start vm
// should after hypervisor.prepare_vm
let resources = self.prepare_for_start_sandbox(netns, config).await?;
let resources = self.prepare_for_start_sandbox(id, netns, config).await?;
self.resource_manager
.prepare_before_start_vm(resources)
.await

View File

@ -10,3 +10,4 @@ async-trait = "0.1.48"
tokio = { version = "1.8.0" }
common = { path = "../common" }
kata-types = { path = "../../../../libs/kata-types" }

View File

@ -8,8 +8,8 @@ use std::sync::Arc;
use anyhow::Result;
use async_trait::async_trait;
use common::{message::Message, RuntimeHandler, RuntimeInstance};
use kata_types::config::TomlConfig;
use tokio::sync::mpsc::Sender;
unsafe impl Send for WasmContainer {}
unsafe impl Sync for WasmContainer {}
pub struct WasmContainer {}
@ -32,6 +32,7 @@ impl RuntimeHandler for WasmContainer {
&self,
_sid: &str,
_msg_sender: Sender<Message>,
_config: &TomlConfig,
) -> Result<RuntimeInstance> {
todo!()
}

View File

@ -10,7 +10,7 @@ async-trait = "0.1.48"
slog = "2.5.2"
slog-scope = "4.4.0"
tokio = { version = "1.8.0", features = ["rt-multi-thread"] }
ttrpc = { version = "0.6.0" }
ttrpc = { version = "0.6.1" }
common = { path = "../runtimes/common" }
containerd-shim-protos = { version = "0.2.0", features = ["async"]}

View File

@ -47,9 +47,8 @@ where
.await
.map_err(|err| ttrpc::Error::Others(format!("failed to handler message {:?}", err)))?;
debug!(logger, "<==== task service {:?}", &resp);
Ok(resp
.try_into()
.map_err(|err| ttrpc::Error::Others(format!("failed to translate to shim {:?}", err)))?)
resp.try_into()
.map_err(|err| ttrpc::Error::Others(format!("failed to translate to shim {:?}", err)))
}
macro_rules! impl_service {

View File

@ -22,8 +22,8 @@ log = "0.4.14"
nix = "0.23.1"
protobuf = "2.27.0"
sha2 = "=0.9.3"
slog = {version = "2.7.0", features = ["std", "release_max_level_trace", "max_level_trace"]}
slog-async = "2.7.0"
slog = {version = "2.5.2", features = ["std", "release_max_level_trace", "max_level_trace"]}
slog-async = "2.5.2"
slog-scope = "4.4.0"
slog-stdlog = "4.1.0"
thiserror = "1.0.30"

View File

@ -4,10 +4,12 @@
// SPDX-License-Identifier: Apache-2.0
//
use std::{boxed::Box, ops::Deref};
use std::{boxed::Box, fs::OpenOptions, io::Write, ops::Deref};
use backtrace::Backtrace;
const KMESG_DEVICE: &str = "/dev/kmsg";
// TODO: the Kata 1.x runtime had a SIGUSR1 handler that would log a formatted backtrace on
// receiving that signal. It could be useful to re-add that feature.
pub(crate) fn set_panic_hook() {
@ -36,6 +38,20 @@ pub(crate) fn set_panic_hook() {
"A panic occurred at {}:{}: {}\r\n{:?}", filename, line, cause, bt_data
);
// print panic log to dmesg
// The panic log size is too large to /dev/kmsg, so write by line.
if let Ok(mut file) = OpenOptions::new().write(true).open(KMESG_DEVICE) {
file.write_all(
format!("A panic occurred at {}:{}: {}", filename, line, cause).as_bytes(),
)
.ok();
let lines: Vec<&str> = bt_data.split('\n').collect();
for line in lines {
file.write_all(line.as_bytes()).ok();
}
file.flush().ok();
}
std::process::abort();
}));
}

View File

@ -509,19 +509,6 @@ version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]]
name = "nix"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f5e06129fb611568ef4e868c14b326274959aa70ff7776e9d55323531c374945"
dependencies = [
"bitflags",
"cc",
"cfg-if 1.0.0",
"libc",
"memoffset",
]
[[package]]
name = "nix"
version = "0.23.1"

View File

@ -63,16 +63,6 @@ version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "bytes"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c"
dependencies = [
"byteorder",
"iovec",
]
[[package]]
name = "bytes"
version = "1.1.0"
@ -127,7 +117,7 @@ checksum = "cdae996d9638ba03253ffa1c93345a585974a97abbdeab9176c77922f3efc1e8"
dependencies = [
"libc",
"log",
"nix 0.23.1",
"nix",
"regex",
]
@ -495,15 +485,6 @@ dependencies = [
"cfg-if 1.0.0",
]
[[package]]
name = "iovec"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e"
dependencies = [
"libc",
]
[[package]]
name = "itertools"
version = "0.10.3"
@ -540,7 +521,7 @@ dependencies = [
"derive_builder",
"libc",
"logging",
"nix 0.23.1",
"nix",
"oci",
"rustjail",
"serde",
@ -632,19 +613,6 @@ version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
[[package]]
name = "nix"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd0eaf8df8bab402257e0a5c17a254e4cc1f72a93588a1ddfb5d356c801aa7cb"
dependencies = [
"bitflags",
"cc",
"cfg-if 0.1.10",
"libc",
"void",
]
[[package]]
name = "nix"
version = "0.23.1"
@ -830,7 +798,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020"
dependencies = [
"bytes 1.1.0",
"bytes",
"prost-derive",
]
@ -840,7 +808,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603"
dependencies = [
"bytes 1.1.0",
"bytes",
"heck 0.3.3",
"itertools",
"log",
@ -871,15 +839,15 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b"
dependencies = [
"bytes 1.1.0",
"bytes",
"prost",
]
[[package]]
name = "protobuf"
version = "2.14.0"
version = "2.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485"
checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96"
dependencies = [
"serde",
"serde_derive",
@ -887,18 +855,18 @@ dependencies = [
[[package]]
name = "protobuf-codegen"
version = "2.14.0"
version = "2.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de113bba758ccf2c1ef816b127c958001b7831136c9bc3f8e9ec695ac4e82b0c"
checksum = "aec1632b7c8f2e620343439a7dfd1f3c47b18906c4be58982079911482b5d707"
dependencies = [
"protobuf",
]
[[package]]
name = "protobuf-codegen-pure"
version = "2.14.0"
version = "2.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d1a4febc73bf0cada1d77c459a0c8e5973179f1cfd5b0f1ab789d45b17b6440"
checksum = "9f8122fdb18e55190c796b088a16bdb70cd7acdcd48f7a8b796b58c62e532cc6"
dependencies = [
"protobuf",
"protobuf-codegen",
@ -908,7 +876,7 @@ dependencies = [
name = "protocols"
version = "0.1.0"
dependencies = [
"async-trait",
"oci",
"protobuf",
"ttrpc",
"ttrpc-codegen",
@ -978,7 +946,7 @@ dependencies = [
"libcontainer",
"liboci-cli",
"logging",
"nix 0.23.1",
"nix",
"oci",
"rustjail",
"serde",
@ -1004,7 +972,7 @@ dependencies = [
"inotify",
"lazy_static",
"libc",
"nix 0.23.1",
"nix",
"oci",
"path-absolutize",
"protobuf",
@ -1269,7 +1237,7 @@ version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"
dependencies = [
"bytes 1.1.0",
"bytes",
"libc",
"memchr",
"mio",
@ -1294,36 +1262,19 @@ dependencies = [
"syn",
]
[[package]]
name = "tokio-vsock"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e0723fc001950a3b018947b05eeb45014fd2b7c6e8f292502193ab74486bdb6"
dependencies = [
"bytes 0.4.12",
"futures",
"libc",
"tokio",
"vsock",
]
[[package]]
name = "ttrpc"
version = "0.5.1"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "004604e91de38bc16cb9c7898187343075388ea414ad24896a21fc4e91a7c861"
checksum = "2ecfff459a859c6ba6668ff72b34c2f1d94d9d58f7088414c2674ad0f31cc7d8"
dependencies = [
"async-trait",
"byteorder",
"futures",
"libc",
"log",
"nix 0.16.1",
"nix",
"protobuf",
"protobuf-codegen-pure",
"thiserror",
"tokio",
"tokio-vsock",
]
[[package]]
@ -1387,22 +1338,6 @@ version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "void"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
[[package]]
name = "vsock"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e32675ee2b3ce5df274c0ab52d19b28789632406277ca26bffee79a8e27dc133"
dependencies = [
"libc",
"nix 0.23.1",
]
[[package]]
name = "wasi"
version = "0.10.0+wasi-snapshot-preview1"

View File

@ -528,9 +528,9 @@ dependencies = [
[[package]]
name = "protobuf"
version = "2.14.0"
version = "2.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e86d370532557ae7573551a1ec8235a0f8d6cb276c7c9e6aa490b511c447485"
checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96"
[[package]]
name = "quote"