Merge pull request #1251 from bergwolf/backport-2.0.0

Backport to stable-2.0 branch
This commit is contained in:
Eric Ernst 2021-01-13 12:25:15 -08:00 committed by GitHub
commit 4f7f25d1a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 625 additions and 196 deletions

3
src/agent/Cargo.lock generated
View File

@ -1174,7 +1174,8 @@ dependencies = [
[[package]] [[package]]
name = "ttrpc" name = "ttrpc"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/containerd/ttrpc-rust.git?branch=0.3.0#ba1efe3bbb8f8af4895b7623ed1d11561e70e566" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa9da24c351f0feef5e66c0b28c18373a7ef3e1bfdfd5852170de494f9bf870"
dependencies = [ dependencies = [
"byteorder", "byteorder",
"libc", "libc",

View File

@ -11,7 +11,7 @@ rustjail = { path = "rustjail" }
protocols = { path = "protocols" } protocols = { path = "protocols" }
netlink = { path = "netlink", features = ["with-log", "with-agent-handler"] } netlink = { path = "netlink", features = ["with-log", "with-agent-handler"] }
lazy_static = "1.3.0" lazy_static = "1.3.0"
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" } ttrpc = "0.3.0"
protobuf = "=2.14.0" protobuf = "=2.14.0"
libc = "0.2.58" libc = "0.2.58"
nix = "0.17.0" nix = "0.17.0"

View File

@ -142,7 +142,7 @@ pub struct User {
pub gid: u32, pub gid: u32,
#[serde( #[serde(
default, default,
rename = "addtionalGids", rename = "additionalGids",
skip_serializing_if = "Vec::is_empty" skip_serializing_if = "Vec::is_empty"
)] )]
pub additional_gids: Vec<u32>, pub additional_gids: Vec<u32>,
@ -302,6 +302,7 @@ pub struct LinuxBlockIODevice {
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] #[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
pub struct LinuxWeightDevice { pub struct LinuxWeightDevice {
#[serde(flatten)]
pub blk: LinuxBlockIODevice, pub blk: LinuxBlockIODevice,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub weight: Option<u16>, pub weight: Option<u16>,
@ -315,6 +316,7 @@ pub struct LinuxWeightDevice {
#[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] #[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)]
pub struct LinuxThrottleDevice { pub struct LinuxThrottleDevice {
#[serde(flatten)]
pub blk: LinuxBlockIODevice, pub blk: LinuxBlockIODevice,
#[serde(default)] #[serde(default)]
pub rate: u64, pub rate: u64,
@ -375,7 +377,7 @@ pub struct LinuxMemory {
#[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelTCP")] #[serde(default, skip_serializing_if = "Option::is_none", rename = "kernelTCP")]
pub kernel_tcp: Option<i64>, pub kernel_tcp: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub swapiness: Option<i64>, pub swappiness: Option<i64>,
#[serde( #[serde(
default, default,
skip_serializing_if = "Option::is_none", skip_serializing_if = "Option::is_none",
@ -833,7 +835,7 @@ mod tests {
} }
#[test] #[test]
fn test_deserialize_sepc() { fn test_deserialize_spec() {
let data = r#"{ let data = r#"{
"ociVersion": "1.0.1", "ociVersion": "1.0.1",
"process": { "process": {
@ -1118,36 +1120,28 @@ mod tests {
"leafWeight": 10, "leafWeight": 10,
"weightDevice": [ "weightDevice": [
{ {
"blk": {
"major": 8, "major": 8,
"minor": 0 "minor": 0,
},
"weight": 500, "weight": 500,
"leafWeight": 300 "leafWeight": 300
}, },
{ {
"blk":{
"major": 8, "major": 8,
"minor": 16 "minor": 16,
},
"weight": 500 "weight": 500
} }
], ],
"throttleReadBpsDevice": [ "throttleReadBpsDevice": [
{ {
"blk":{
"major": 8, "major": 8,
"minor": 0 "minor": 0,
},
"rate": 600 "rate": 600
} }
], ],
"throttleWriteIOPSDevice": [ "throttleWriteIOPSDevice": [
{ {
"blk":{
"major": 8, "major": 8,
"minor": 16 "minor": 16,
},
"rate": 300 "rate": 300
} }
] ]
@ -1223,8 +1217,7 @@ mod tests {
uid: 1, uid: 1,
gid: 1, gid: 1,
// incompatible with oci // incompatible with oci
// additional_gids: vec![5, 6], additional_gids: vec![5, 6],
additional_gids: vec![],
username: "".to_string(), username: "".to_string(),
}, },
args: vec!["sh".to_string()], args: vec!["sh".to_string()],
@ -1437,8 +1430,7 @@ mod tests {
swap: Some(536870912), swap: Some(536870912),
kernel: Some(-1), kernel: Some(-1),
kernel_tcp: Some(-1), kernel_tcp: Some(-1),
// incompatible with oci swappiness: Some(0),
swapiness: None,
disable_oom_killer: Some(false), disable_oom_killer: Some(false),
}), }),
cpu: Some(crate::LinuxCPU { cpu: Some(crate::LinuxCPU {
@ -1591,25 +1583,6 @@ mod tests {
vm: None, vm: None,
}; };
// warning : incompatible with oci : https://github.com/opencontainers/runtime-spec/blob/master/config.md
// 1. User use addtionalGids while oci use additionalGids
// 2. LinuxMemory use swapiness while oci use swappiness
// 3. LinuxWeightDevice with blk
// {
// "blk": {
// "major": 8,
// "minor": 0
// },
// "weight": 500,
// "leafWeight": 300
// }
// oci without blk
// {
// "major": 8,
// "minor": 0,
// "weight": 500,
// "leafWeight": 300
// }
let current: crate::Spec = serde_json::from_str(data).unwrap(); let current: crate::Spec = serde_json::from_str(data).unwrap();
assert_eq!(expected, current); assert_eq!(expected, current);
} }

View File

@ -5,7 +5,7 @@ authors = ["The Kata Containers community <kata-dev@lists.katacontainers.io>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
ttrpc = { git = "https://github.com/containerd/ttrpc-rust.git", branch="0.3.0" } ttrpc = "0.3.0"
protobuf = "=2.14.0" protobuf = "=2.14.0"
futures = "0.1.27" futures = "0.1.27"

View File

@ -67,6 +67,15 @@ pub fn load<'a>(h: Box<&'a dyn cgroups::Hierarchy>, path: &str) -> Option<Cgroup
} }
} }
macro_rules! get_controller_or_return_singular_none {
($cg:ident) => {
match $cg.controller_of() {
Some(c) => c,
None => return SingularPtrField::none(),
}
};
}
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Manager { pub struct Manager {
pub paths: HashMap<String, String>, pub paths: HashMap<String, String>,
@ -421,13 +430,13 @@ fn set_memory_resources(cg: &cgroups::Cgroup, memory: &LinuxMemory, update: bool
} }
} }
if let Some(swapiness) = memory.swapiness { if let Some(swappiness) = memory.swappiness {
if swapiness >= 0 && swapiness <= 100 { if swappiness >= 0 && swappiness <= 100 {
mem_controller.set_swappiness(swapiness as u64)?; mem_controller.set_swappiness(swappiness as u64)?;
} else { } else {
return Err(anyhow!( return Err(anyhow!(
"invalid value:{}. valid memory swappiness range is 0-100", "invalid value:{}. valid memory swappiness range is 0-100",
swapiness swappiness
)); ));
} }
} }
@ -605,10 +614,8 @@ lazy_static! {
} }
fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> { fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> {
let cpu_controller: &CpuController = cg.controller_of().unwrap(); let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
let stat = cpu_controller.cpu().stat; let stat = cpu_controller.cpu().stat;
let h = lines_to_map(&stat); let h = lines_to_map(&stat);
SingularPtrField::some(ThrottlingData { SingularPtrField::some(ThrottlingData {
@ -621,39 +628,7 @@ fn get_cpu_stats(cg: &cgroups::Cgroup) -> SingularPtrField<ThrottlingData> {
} }
fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> { fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
let cpuacct_controller: Option<&CpuAcctController> = cg.controller_of(); if let Some(cpuacct_controller) = cg.controller_of::<CpuAcctController>() {
if cpuacct_controller.is_none() {
if cg.v2() {
return SingularPtrField::some(CpuUsage {
total_usage: 0,
percpu_usage: vec![],
usage_in_kernelmode: 0,
usage_in_usermode: 0,
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
});
}
// try to get from cpu controller
let cpu_controller: &CpuController = cg.controller_of().unwrap();
let stat = cpu_controller.cpu().stat;
let h = lines_to_map(&stat);
let usage_in_usermode = *h.get("user_usec").unwrap();
let usage_in_kernelmode = *h.get("system_usec").unwrap();
let total_usage = *h.get("usage_usec").unwrap();
let percpu_usage = vec![];
return SingularPtrField::some(CpuUsage {
total_usage,
percpu_usage,
usage_in_kernelmode,
usage_in_usermode,
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
});
}
let cpuacct_controller = cpuacct_controller.unwrap();
let cpuacct = cpuacct_controller.cpuacct(); let cpuacct = cpuacct_controller.cpuacct();
let h = lines_to_map(&cpuacct.stat); let h = lines_to_map(&cpuacct.stat);
@ -666,6 +641,36 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
let percpu_usage = line_to_vec(&cpuacct.usage_percpu); let percpu_usage = line_to_vec(&cpuacct.usage_percpu);
return SingularPtrField::some(CpuUsage {
total_usage,
percpu_usage,
usage_in_kernelmode,
usage_in_usermode,
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
});
}
if cg.v2() {
return SingularPtrField::some(CpuUsage {
total_usage: 0,
percpu_usage: vec![],
usage_in_kernelmode: 0,
usage_in_usermode: 0,
unknown_fields: UnknownFields::default(),
cached_size: CachedSize::default(),
});
}
// try to get from cpu controller
let cpu_controller: &CpuController = get_controller_or_return_singular_none!(cg);
let stat = cpu_controller.cpu().stat;
let h = lines_to_map(&stat);
let usage_in_usermode = *h.get("user_usec").unwrap();
let usage_in_kernelmode = *h.get("system_usec").unwrap();
let total_usage = *h.get("usage_usec").unwrap();
let percpu_usage = vec![];
SingularPtrField::some(CpuUsage { SingularPtrField::some(CpuUsage {
total_usage, total_usage,
percpu_usage, percpu_usage,
@ -677,7 +682,7 @@ fn get_cpuacct_stats(cg: &cgroups::Cgroup) -> SingularPtrField<CpuUsage> {
} }
fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> { fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
let memory_controller: &MemController = cg.controller_of().unwrap(); let memory_controller: &MemController = get_controller_or_return_singular_none!(cg);
// cache from memory stat // cache from memory stat
let memory = memory_controller.memory_stat(); let memory = memory_controller.memory_stat();
@ -734,7 +739,7 @@ fn get_memory_stats(cg: &cgroups::Cgroup) -> SingularPtrField<MemoryStats> {
} }
fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> { fn get_pids_stats(cg: &cgroups::Cgroup) -> SingularPtrField<PidsStats> {
let pid_controller: &PidController = cg.controller_of().unwrap(); let pid_controller: &PidController = get_controller_or_return_singular_none!(cg);
let current = pid_controller.get_pid_current().unwrap_or(0); let current = pid_controller.get_pid_current().unwrap_or(0);
let max = pid_controller.get_pid_max(); let max = pid_controller.get_pid_max();
@ -841,7 +846,7 @@ fn build_blkio_stats_entry(major: i16, minor: i16, op: &str, value: u64) -> Blki
} }
fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> { fn get_blkio_stats_v2(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
let blkio_controller: &BlkIoController = cg.controller_of().unwrap(); let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
let blkio = blkio_controller.blkio(); let blkio = blkio_controller.blkio();
let mut resp = BlkioStats::new(); let mut resp = BlkioStats::new();
@ -869,7 +874,7 @@ fn get_blkio_stats(cg: &cgroups::Cgroup) -> SingularPtrField<BlkioStats> {
return get_blkio_stats_v2(&cg); return get_blkio_stats_v2(&cg);
} }
let blkio_controller: &BlkIoController = cg.controller_of().unwrap(); let blkio_controller: &BlkIoController = get_controller_or_return_singular_none!(cg);
let blkio = blkio_controller.blkio(); let blkio = blkio_controller.blkio();
let mut m = BlkioStats::new(); let mut m = BlkioStats::new();

View File

@ -309,7 +309,7 @@ pub fn resources_grpc_to_oci(res: &grpcLinuxResources) -> ociLinuxResources {
swap: Some(mem.Swap), swap: Some(mem.Swap),
kernel: Some(mem.Kernel), kernel: Some(mem.Kernel),
kernel_tcp: Some(mem.KernelTCP), kernel_tcp: Some(mem.KernelTCP),
swapiness: Some(mem.Swappiness as i64), swappiness: Some(mem.Swappiness as i64),
disable_oom_killer: Some(mem.DisableOOMKiller), disable_oom_killer: Some(mem.DisableOOMKiller),
}) })
} else { } else {

View File

@ -185,6 +185,7 @@ pub fn init_rootfs(
None::<&str>, None::<&str>,
)?; )?;
let mut bind_mount_dev = false;
for m in &spec.mounts { for m in &spec.mounts {
let (mut flags, data) = parse_mount(&m); let (mut flags, data) = parse_mount(&m);
if !m.destination.starts_with("/") || m.destination.contains("..") { if !m.destination.starts_with("/") || m.destination.contains("..") {
@ -198,6 +199,9 @@ pub fn init_rootfs(
mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?; mount_cgroups(cfd_log, &m, rootfs, flags, &data, cpath, mounts)?;
} else { } else {
if m.destination == "/dev" { if m.destination == "/dev" {
if m.r#type == "bind" {
bind_mount_dev = true;
}
flags &= !MsFlags::MS_RDONLY; flags &= !MsFlags::MS_RDONLY;
} }
@ -239,9 +243,14 @@ pub fn init_rootfs(
let olddir = unistd::getcwd()?; let olddir = unistd::getcwd()?;
unistd::chdir(rootfs)?; unistd::chdir(rootfs)?;
// in case the /dev directory was binded mount from guest,
// then there's no need to create devices nodes and symlinks
// in /dev.
if !bind_mount_dev {
default_symlinks()?; default_symlinks()?;
create_devices(&linux.devices, bind_device)?; create_devices(&linux.devices, bind_device)?;
ensure_ptmx()?; ensure_ptmx()?;
}
unistd::chdir(&olddir)?; unistd::chdir(&olddir)?;

View File

@ -72,8 +72,16 @@ fn read_count(fd: RawFd, count: usize) -> Result<Vec<u8>> {
} }
} }
if len != count {
Err(anyhow::anyhow!(
"invalid read count expect {} get {}",
count,
len
))
} else {
Ok(v[0..len].to_vec()) Ok(v[0..len].to_vec())
} }
}
pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> { pub fn read_sync(fd: RawFd) -> Result<Vec<u8>> {
let buf = read_count(fd, MSG_SIZE)?; let buf = read_count(fd, MSG_SIZE)?;

View File

@ -4,7 +4,7 @@
// //
use crate::container::Config; use crate::container::Config;
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Context, Result};
use lazy_static; use lazy_static;
use nix::errno::Errno; use nix::errno::Errno;
use oci::{LinuxIDMapping, LinuxNamespace, Spec}; use oci::{LinuxIDMapping, LinuxNamespace, Spec};
@ -52,7 +52,11 @@ fn rootfs(root: &str) -> Result<()> {
continue; continue;
} }
stack.push(c.as_os_str().to_str().unwrap().to_string()); if let Some(v) = c.as_os_str().to_str() {
stack.push(v.to_string());
} else {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
} }
let mut cleaned = PathBuf::from("/"); let mut cleaned = PathBuf::from("/");
@ -60,7 +64,7 @@ fn rootfs(root: &str) -> Result<()> {
cleaned.push(e); cleaned.push(e);
} }
let canon = path.canonicalize()?; let canon = path.canonicalize().context("canonicalize")?;
if cleaned != canon { if cleaned != canon {
// There is symbolic in path // There is symbolic in path
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
@ -78,10 +82,10 @@ fn hostname(oci: &Spec) -> Result<()> {
return Ok(()); return Ok(());
} }
if oci.linux.is_none() { let linux = oci
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); .linux
} .as_ref()
let linux = oci.linux.as_ref().unwrap(); .ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
if !contain_namespace(&linux.namespaces, "uts") { if !contain_namespace(&linux.namespaces, "uts") {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
@ -90,8 +94,11 @@ fn hostname(oci: &Spec) -> Result<()> {
} }
fn security(oci: &Spec) -> Result<()> { fn security(oci: &Spec) -> Result<()> {
let linux = oci.linux.as_ref().unwrap(); let linux = oci
if linux.masked_paths.len() == 0 && linux.readonly_paths.len() == 0 { .linux
.as_ref()
.ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
if linux.masked_paths.is_empty() && linux.readonly_paths.is_empty() {
return Ok(()); return Ok(());
} }
@ -115,7 +122,10 @@ fn idmapping(maps: &Vec<LinuxIDMapping>) -> Result<()> {
} }
fn usernamespace(oci: &Spec) -> Result<()> { fn usernamespace(oci: &Spec) -> Result<()> {
let linux = oci.linux.as_ref().unwrap(); let linux = oci
.linux
.as_ref()
.ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
if contain_namespace(&linux.namespaces, "user") { if contain_namespace(&linux.namespaces, "user") {
let user_ns = PathBuf::from("/proc/self/ns/user"); let user_ns = PathBuf::from("/proc/self/ns/user");
if !user_ns.exists() { if !user_ns.exists() {
@ -123,8 +133,8 @@ fn usernamespace(oci: &Spec) -> Result<()> {
} }
// check if idmappings is correct, at least I saw idmaps // check if idmappings is correct, at least I saw idmaps
// with zero size was passed to agent // with zero size was passed to agent
idmapping(&linux.uid_mappings)?; idmapping(&linux.uid_mappings).context("idmapping uid")?;
idmapping(&linux.gid_mappings)?; idmapping(&linux.gid_mappings).context("idmapping gid")?;
} else { } else {
// no user namespace but idmap // no user namespace but idmap
if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 { if linux.uid_mappings.len() != 0 || linux.gid_mappings.len() != 0 {
@ -136,7 +146,10 @@ fn usernamespace(oci: &Spec) -> Result<()> {
} }
fn cgroupnamespace(oci: &Spec) -> Result<()> { fn cgroupnamespace(oci: &Spec) -> Result<()> {
let linux = oci.linux.as_ref().unwrap(); let linux = oci
.linux
.as_ref()
.ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
if contain_namespace(&linux.namespaces, "cgroup") { if contain_namespace(&linux.namespaces, "cgroup") {
let path = PathBuf::from("/proc/self/ns/cgroup"); let path = PathBuf::from("/proc/self/ns/cgroup");
if !path.exists() { if !path.exists() {
@ -165,14 +178,20 @@ fn check_host_ns(path: &str) -> Result<()> {
let cpath = PathBuf::from(path); let cpath = PathBuf::from(path);
let hpath = PathBuf::from("/proc/self/ns/net"); let hpath = PathBuf::from("/proc/self/ns/net");
let real_hpath = hpath.read_link()?; let real_hpath = hpath
let meta = cpath.symlink_metadata()?; .read_link()
.context(format!("read link {:?}", hpath))?;
let meta = cpath
.symlink_metadata()
.context(format!("symlink metadata {:?}", cpath))?;
let file_type = meta.file_type(); let file_type = meta.file_type();
if !file_type.is_symlink() { if !file_type.is_symlink() {
return Ok(()); return Ok(());
} }
let real_cpath = cpath.read_link()?; let real_cpath = cpath
.read_link()
.context(format!("read link {:?}", cpath))?;
if real_cpath == real_hpath { if real_cpath == real_hpath {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
@ -181,7 +200,10 @@ fn check_host_ns(path: &str) -> Result<()> {
} }
fn sysctl(oci: &Spec) -> Result<()> { fn sysctl(oci: &Spec) -> Result<()> {
let linux = oci.linux.as_ref().unwrap(); let linux = oci
.linux
.as_ref()
.ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
for (key, _) in linux.sysctl.iter() { for (key, _) in linux.sysctl.iter() {
if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") { if SYSCTLS.contains_key(key.as_str()) || key.starts_with("fs.mqueue.") {
if contain_namespace(&linux.namespaces, "ipc") { if contain_namespace(&linux.namespaces, "ipc") {
@ -192,18 +214,10 @@ fn sysctl(oci: &Spec) -> Result<()> {
} }
if key.starts_with("net.") { if key.starts_with("net.") {
if !contain_namespace(&linux.namespaces, "network") { // the network ns is shared with the guest, don't expect to find it in spec
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
}
let net = get_namespace_path(&linux.namespaces, "network")?;
if net.is_empty() || net == "".to_string() {
continue; continue;
} }
check_host_ns(net.as_str())?;
}
if contain_namespace(&linux.namespaces, "uts") { if contain_namespace(&linux.namespaces, "uts") {
if key == "kernel.domainname" { if key == "kernel.domainname" {
continue; continue;
@ -220,7 +234,10 @@ fn sysctl(oci: &Spec) -> Result<()> {
} }
fn rootless_euid_mapping(oci: &Spec) -> Result<()> { fn rootless_euid_mapping(oci: &Spec) -> Result<()> {
let linux = oci.linux.as_ref().unwrap(); let linux = oci
.linux
.as_ref()
.ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
if !contain_namespace(&linux.namespaces, "user") { if !contain_namespace(&linux.namespaces, "user") {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
@ -243,7 +260,10 @@ fn has_idmapping(maps: &Vec<LinuxIDMapping>, id: u32) -> bool {
} }
fn rootless_euid_mount(oci: &Spec) -> Result<()> { fn rootless_euid_mount(oci: &Spec) -> Result<()> {
let linux = oci.linux.as_ref().unwrap(); let linux = oci
.linux
.as_ref()
.ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
for mnt in oci.mounts.iter() { for mnt in oci.mounts.iter() {
for opt in mnt.options.iter() { for opt in mnt.options.iter() {
@ -254,7 +274,10 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
let id = fields[1].trim().parse::<u32>()?; let id = fields[1]
.trim()
.parse::<u32>()
.context(format!("parse field {}", &fields[1]))?;
if opt.starts_with("uid=") { if opt.starts_with("uid=") {
if !has_idmapping(&linux.uid_mappings, id) { if !has_idmapping(&linux.uid_mappings, id) {
@ -274,34 +297,37 @@ fn rootless_euid_mount(oci: &Spec) -> Result<()> {
} }
fn rootless_euid(oci: &Spec) -> Result<()> { fn rootless_euid(oci: &Spec) -> Result<()> {
rootless_euid_mapping(oci)?; rootless_euid_mapping(oci).context("rootless euid mapping")?;
rootless_euid_mount(oci)?; rootless_euid_mount(oci).context("rotless euid mount")?;
Ok(()) Ok(())
} }
pub fn validate(conf: &Config) -> Result<()> { pub fn validate(conf: &Config) -> Result<()> {
lazy_static::initialize(&SYSCTLS); lazy_static::initialize(&SYSCTLS);
let oci = conf.spec.as_ref().unwrap(); let oci = conf
.spec
.as_ref()
.ok_or(anyhow!(nix::Error::from_errno(Errno::EINVAL)))?;
if oci.linux.is_none() { if oci.linux.is_none() {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL)));
} }
if oci.root.is_none() { let root = match oci.root.as_ref() {
return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))); Some(v) => v.path.as_str(),
} None => return Err(anyhow!(nix::Error::from_errno(Errno::EINVAL))),
let root = oci.root.as_ref().unwrap().path.as_str(); };
rootfs(root)?; rootfs(root).context("rootfs")?;
network(oci)?; network(oci).context("network")?;
hostname(oci)?; hostname(oci).context("hostname")?;
security(oci)?; security(oci).context("security")?;
usernamespace(oci)?; usernamespace(oci).context("usernamespace")?;
cgroupnamespace(oci)?; cgroupnamespace(oci).context("cgroupnamespace")?;
sysctl(&oci)?; sysctl(&oci).context("sysctl")?;
if conf.rootless_euid { if conf.rootless_euid {
rootless_euid(oci)?; rootless_euid(oci).context("rootless euid")?;
} }
Ok(()) Ok(())

View File

@ -16,7 +16,6 @@ use std::thread::{self};
use crate::mount::{BareMount, FLAGS}; use crate::mount::{BareMount, FLAGS};
use slog::Logger; use slog::Logger;
//use container::Process;
const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns"; const PERSISTENT_NS_DIR: &str = "/var/run/sandbox-ns";
pub const NSTYPEIPC: &str = "ipc"; pub const NSTYPEIPC: &str = "ipc";
pub const NSTYPEUTS: &str = "uts"; pub const NSTYPEUTS: &str = "uts";
@ -81,7 +80,10 @@ impl Namespace {
fs::create_dir_all(&self.persistent_ns_dir)?; fs::create_dir_all(&self.persistent_ns_dir)?;
let ns_path = PathBuf::from(&self.persistent_ns_dir); let ns_path = PathBuf::from(&self.persistent_ns_dir);
let ns_type = self.ns_type.clone(); let ns_type = self.ns_type;
if ns_type == NamespaceType::PID {
return Err(anyhow!("Cannot persist namespace of PID type"));
}
let logger = self.logger.clone(); let logger = self.logger.clone();
let new_ns_path = ns_path.join(&ns_type.get()); let new_ns_path = ns_path.join(&ns_type.get());
@ -202,7 +204,7 @@ mod tests {
assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok()); assert!(remove_mounts(&vec![ns_ipc.unwrap().path]).is_ok());
let logger = slog::Logger::root(slog::Discard, o!()); let logger = slog::Logger::root(slog::Discard, o!());
let tmpdir = Builder::new().prefix("ipc").tempdir().unwrap(); let tmpdir = Builder::new().prefix("uts").tempdir().unwrap();
let ns_uts = Namespace::new(&logger) let ns_uts = Namespace::new(&logger)
.as_uts("test_hostname") .as_uts("test_hostname")
@ -211,6 +213,17 @@ mod tests {
assert!(ns_uts.is_ok()); assert!(ns_uts.is_ok());
assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok()); assert!(remove_mounts(&vec![ns_uts.unwrap().path]).is_ok());
// Check it cannot persist pid namespaces.
let logger = slog::Logger::root(slog::Discard, o!());
let tmpdir = Builder::new().prefix("pid").tempdir().unwrap();
let ns_pid = Namespace::new(&logger)
.as_pid()
.set_root_dir(tmpdir.path().to_str().unwrap())
.setup();
assert!(ns_pid.is_err());
} }
#[test] #[test]

View File

@ -3,7 +3,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
//use crate::container::Container;
use crate::linux_abi::*; use crate::linux_abi::*;
use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS}; use crate::mount::{get_mount_fs_type, remove_mounts, TYPEROOTFS};
use crate::namespace::Namespace; use crate::namespace::Namespace;
@ -233,6 +232,10 @@ impl Sandbox {
online_memory(&self.logger)?; online_memory(&self.logger)?;
} }
if req.nb_cpus == 0 {
return Ok(());
}
let cpuset = rustjail_cgroups::fs::get_guest_cpuset()?; let cpuset = rustjail_cgroups::fs::get_guest_cpuset()?;
for (_, ctr) in self.containers.iter() { for (_, ctr) in self.containers.iter() {
@ -393,7 +396,6 @@ fn online_memory(logger: &Logger) -> Result<()> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
//use rustjail::Error;
use super::Sandbox; use super::Sandbox;
use crate::{mount::BareMount, skip_if_not_root}; use crate::{mount::BareMount, skip_if_not_root};
use anyhow::Error; use anyhow::Error;

View File

@ -68,7 +68,7 @@ NETMON_TARGET = $(PROJECT_TYPE)-netmon
NETMON_TARGET_OUTPUT = $(CURDIR)/$(NETMON_TARGET) NETMON_TARGET_OUTPUT = $(CURDIR)/$(NETMON_TARGET)
BINLIBEXECLIST += $(NETMON_TARGET) BINLIBEXECLIST += $(NETMON_TARGET)
DESTDIR := / DESTDIR ?= /
ifeq ($(PREFIX),) ifeq ($(PREFIX),)
PREFIX := /usr PREFIX := /usr

View File

@ -17,13 +17,12 @@ import (
func deleteContainer(ctx context.Context, s *service, c *container) error { func deleteContainer(ctx context.Context, s *service, c *container) error {
if !c.cType.IsSandbox() { if !c.cType.IsSandbox() {
if c.status != task.StatusStopped { if c.status != task.StatusStopped {
_, err := s.sandbox.StopContainer(c.id, false) if _, err := s.sandbox.StopContainer(c.id, false); err != nil && !isNotFound(err) {
if err != nil {
return err return err
} }
} }
if _, err := s.sandbox.DeleteContainer(c.id); err != nil { if _, err := s.sandbox.DeleteContainer(c.id); err != nil && !isNotFound(err) {
return err return err
} }
} }

View File

@ -635,10 +635,10 @@ func (k *kataAgent) listInterfaces() ([]*pbTypes.Interface, error) {
return nil, err return nil, err
} }
resultInterfaces, ok := resultingInterfaces.(*grpc.Interfaces) resultInterfaces, ok := resultingInterfaces.(*grpc.Interfaces)
if ok { if !ok {
return resultInterfaces.Interfaces, err return nil, fmt.Errorf("Unexpected type %T for interfaces", resultingInterfaces)
} }
return nil, err return resultInterfaces.Interfaces, nil
} }
func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) { func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) {
@ -648,10 +648,10 @@ func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) {
return nil, err return nil, err
} }
resultRoutes, ok := resultingRoutes.(*grpc.Routes) resultRoutes, ok := resultingRoutes.(*grpc.Routes)
if ok { if !ok {
return resultRoutes.Routes, err return nil, fmt.Errorf("Unexpected type %T for routes", resultingRoutes)
} }
return nil, err return resultRoutes.Routes, nil
} }
func (k *kataAgent) getAgentURL() (string, error) { func (k *kataAgent) getAgentURL() (string, error) {

View File

@ -2203,6 +2203,12 @@ func (q *qemu) toGrpc() ([]byte, error) {
} }
func (q *qemu) save() (s persistapi.HypervisorState) { func (q *qemu) save() (s persistapi.HypervisorState) {
// If QEMU isn't even running, there isn't any state to save
if q.stopped {
return
}
pids := q.getPids() pids := q.getPids()
if len(pids) != 0 { if len(pids) != 0 {
s.Pid = pids[0] s.Pid = pids[0]

View File

@ -13,10 +13,13 @@ clean:
test: test:
install:
check: check:
.PHONY: \ .PHONY: \
build \ build \
test \ test \
check \ check \
install \
clean clean

View File

@ -13,10 +13,13 @@ clean:
test: test:
install:
check: check:
.PHONY: \ .PHONY: \
build \ build \
test \ test \
check \ check \
install \
clean clean

View File

@ -0,0 +1,13 @@
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
from docker.io/gentoo/stage3-amd64:latest
# This dockerfile needs to provide all the componets need to build a rootfs
# Install any package need to create a rootfs (package manager, extra tools)
# This will install the proper golang to build Kata components
@INSTALL_GO@
@INSTALL_RUST@

View File

@ -0,0 +1,22 @@
# This is a configuration file add extra variables to
#
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# be used by build_rootfs() from rootfs_lib.sh the variables will be
# loaded just before call the function. For more information see the
# rootfs-builder/README.md file.
OS_VERSION=${OS_VERSION:-latest}
OS_NAME=${OS_NAME:-"gentoo"}
# packages to be installed by default
PACKAGES="sys-apps/systemd net-firewall/iptables net-misc/chrony"
# Init process must be one of {systemd,kata-agent}
INIT_PROCESS=systemd
# List of zero or more architectures to exclude from build,
# as reported by `uname -m`
ARCH_EXCLUDE_LIST=( aarch64 ppc64le s390x )
[ "$SECCOMP" = "yes" ] && PACKAGES+=" sys-libs/libseccomp" || true

View File

@ -0,0 +1,210 @@
# Copyright (c) 2020 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# - Arguments
# rootfs_dir=$1
#
# - Optional environment variables
#
# EXTRA_PKGS: Variable to add extra PKGS provided by the user
#
# BIN_AGENT: Name of the Kata-Agent binary
#
# REPO_URL: URL to distribution repository ( should be configured in
# config.sh file)
#
# Any other configuration variable for a specific distro must be added
# and documented on its own config.sh
#
# - Expected result
#
# rootfs_dir populated with rootfs pkgs
# It must provide a binary in /sbin/init
#
gentoo_portage_container=gentoo_portage
gentoo_local_portage_dir="${HOME}/gentoo-$(date +%s)"
build_rootfs() {
# Mandatory
local ROOTFS_DIR=$1
# In case of support EXTRA packages, use it to allow
# users to add more packages to the base rootfs
local EXTRA_PKGS=${EXTRA_PKGS:-}
# Populate ROOTFS_DIR
# Must provide /sbin/init and /bin/${BIN_AGENT}
check_root
mkdir -p "${ROOTFS_DIR}"
# trim whitespace
PACKAGES=$(echo $PACKAGES |xargs )
EXTRA_PKGS=$(echo $EXTRA_PKGS |xargs)
# extra packages are added to packages and finally passed to debootstrap
if [ "${EXTRA_PKGS}" = "" ]; then
echo "no extra packages"
else
PACKAGES="${PACKAGES} ${EXTRA_PKGS}"
fi
local packageuseconf="/etc/portage/package.use/user"
local makeconf="/etc/portage/make.conf"
local systemd_optimizations=(
acl
-apparmor
-audit
cgroup-hybrid
-cryptsetup
-curl
-dns-over-tls
-gcrypt
-gnuefi
-homed
-http
-hwdb
-idn
-importd
kmod
-lz4
-lzma
-nat
-pkcs11
-policykit
-pwquality
-qrcode
-repart
-resolvconf
sysv-utils
-test
-xkb
-zstd
)
local packages_optimizations=(
-abi_x86_32
-abi_x86_x32
-debug
-doc
-examples
multicall
-ncurses
-nls
-selinux
systemd
-udev
-unicode
-X
)
local compiler_optimizations=(
-O3
-fassociative-math
-fasynchronous-unwind-tables
-feliminate-unused-debug-types
-fexceptions
-ffat-lto-objects
-fno-semantic-interposition
-fno-signed-zeros
-fno-trapping-math
-fstack-protector
-ftree-loop-distribute-patterns
-m64
-mtune=skylake
--param=ssp-buffer-size=32
-pipe
-Wl,--copy-dt-needed-entries
-Wp,-D_REENTRANT
-Wl,--enable-new-dtags
-Wl,-sort-common
-Wl,-z -Wl,now
-Wl,-z -Wl,relro
)
local build_dependencies=(
dev-vcs/git
)
local conflicting_packages=(
net-misc/netifrc sys-apps/sysvinit
sys-fs/eudev sys-apps/openrc
virtual/service-manager
)
# systemd optimizations
echo "sys-apps/systemd ${systemd_optimizations[*]}" >> ${packageuseconf}
echo "MAKEOPTS=\"-j$(nproc)\"" >> ${makeconf}
# Packages optimizations
echo "USE=\"${packages_optimizations[*]}\"" >> ${makeconf}
# compiler optimizations
echo "CFLAGS=\"${compiler_optimizations[*]}\"" >> ${makeconf}
echo 'CXXFLAGS="${CFLAGS}"' >> ${makeconf}
# remove conflicting packages
emerge -Cv $(echo "${conflicting_packages[*]}")
# Get the latest systemd portage profile and set it
systemd_profile=$(profile-config list | grep stable | grep -E "[[:digit:]]/systemd" | xargs | cut -d' ' -f2)
profile-config set "${systemd_profile}"
# Install build dependencies
emerge --newuse $(echo "${build_dependencies[*]}")
quickpkg --include-unmodified-config=y "*/*"
# Install needed packages excluding conflicting packages
ROOT=${ROOTFS_DIR} emerge --exclude "$(echo "${conflicting_packages[*]}")" --newuse -k ${PACKAGES}
pushd ${ROOTFS_DIR}
# systemd will need this library
cp /usr/lib/gcc/x86_64-pc-linux-gnu/*/libgcc_s.so* lib64/
# Clean up the rootfs. there are things that we don't need
rm -rf etc/{udev,X11,kernel,runlevels,terminfo,init.d}
rm -rf var/lib/{gentoo,portage}
rm -rf var/{db,cache}
rm -rf usr/share/*
rm -rf usr/lib/{udev,gconv,kernel}
rm -rf usr/{include,local}
rm -rf usr/lib64/gconv
rm -rf lib/{udev,gentoo}
# Make sure important directories exist in the rootfs
ln -s ../run var/run
mkdir -p proc opt sys dev home root
popd
}
before_starting_container() {
gentoo_portage_image="gentoo/portage"
if [ "${OS_VERSION}" = "latest" ];then
${container_engine} pull "${gentoo_portage_image}:latest"
OS_VERSION=$(docker image inspect -f {{.Created}} ${gentoo_portage_image} | cut -dT -f1 | sed 's|-||g')
else
${container_engine} pull "${gentoo_portage_image}:${OS_VERSION}"
fi
# create portage volume and container
${container_engine} create -v /usr/portage --name "${gentoo_portage_container}" "${gentoo_portage_image}" /bin/true
}
after_stopping_container() {
# Get the list of volumes
volumes=""
for i in $(seq $(${container_engine} inspect -f "{{len .Mounts}}" "${gentoo_portage_container}")); do
volumes+="$(${container_engine} inspect -f "{{(index .Mounts $((i-1))).Name}}" "${gentoo_portage_container}") "
done
# remove portage container
${container_engine} rm -f "${gentoo_portage_container}"
sudo rm -rf "${gentoo_local_portage_dir}"
# remove portage volumes
${container_engine} volume rm -f ${volumes}
}

View File

@ -181,20 +181,39 @@ docker_extra_args()
{ {
local args="" local args=""
case "$1" in # Required to mount inside a container
ubuntu | debian) args+=" --cap-add SYS_ADMIN"
# Requred to chroot # Requred to chroot
args+=" --cap-add SYS_CHROOT" args+=" --cap-add SYS_CHROOT"
# debootstrap needs to create device nodes to properly function # debootstrap needs to create device nodes to properly function
args+=" --cap-add MKNOD" args+=" --cap-add MKNOD"
;&
suse) case "$1" in
# Required to mount inside a container gentoo)
args+=" --cap-add SYS_ADMIN" # Required to build glibc
args+=" --cap-add SYS_PTRACE"
# mount portage volume
args+=" -v ${gentoo_local_portage_dir}:/usr/portage/packages"
args+=" --volumes-from ${gentoo_portage_container}"
;;
debian | ubuntu | suse)
source /etc/os-release
case "$ID" in
fedora | centos | rhel)
# Depending on the podman version, we'll face issues when passing
# `--security-opt apparmor=unconfined` on a system where not apparmor is not installed.
# Because of this, let's just avoid adding this option when the host OS comes from Red Hat.
# A explict check for podman, at least for now, can be avoided.
;;
*)
# When AppArmor is enabled, mounting inside a container is blocked with docker-default profile. # When AppArmor is enabled, mounting inside a container is blocked with docker-default profile.
# See https://github.com/moby/moby/issues/16429 # See https://github.com/moby/moby/issues/16429
args+=" --security-opt apparmor=unconfined" args+=" --security-opt apparmor=unconfined"
;; ;;
esac
;;
*) *)
;; ;;
esac esac
@ -400,6 +419,9 @@ build_rootfs_distro()
done done
fi fi
before_starting_container
trap after_stopping_container EXIT
#Make sure we use a compatible runtime to build rootfs #Make sure we use a compatible runtime to build rootfs
# In case Clear Containers Runtime is installed we dont want to hit issue: # In case Clear Containers Runtime is installed we dont want to hit issue:
#https://github.com/clearcontainers/runtime/issues/828 #https://github.com/clearcontainers/runtime/issues/828
@ -503,6 +525,10 @@ EOT
mkdir -p "${ROOTFS_DIR}/etc" mkdir -p "${ROOTFS_DIR}/etc"
case "${distro}" in case "${distro}" in
"gentoo")
chrony_conf_file="${ROOTFS_DIR}/etc/chrony/chrony.conf"
chrony_systemd_service="${ROOTFS_DIR}/lib/systemd/system/chronyd.service"
;;
"ubuntu" | "debian") "ubuntu" | "debian")
echo "I am ubuntu or debian" echo "I am ubuntu or debian"
chrony_conf_file="${ROOTFS_DIR}/etc/chrony/chrony.conf" chrony_conf_file="${ROOTFS_DIR}/etc/chrony/chrony.conf"
@ -527,7 +553,9 @@ EOT
sed -i 's/^\(server \|pool \|peer \)/# &/g' ${chrony_conf_file} sed -i 's/^\(server \|pool \|peer \)/# &/g' ${chrony_conf_file}
if [ -f "$chrony_systemd_service" ]; then if [ -f "$chrony_systemd_service" ]; then
sed -i '/^\[Unit\]/a ConditionPathExists=\/dev\/ptp0' ${chrony_systemd_service} # Remove user option, user could not exist in the rootfs
sed -i -e 's/^\(ExecStart=.*\)-u [[:alnum:]]*/\1/g' \
-e '/^\[Unit\]/a ConditionPathExists=\/dev\/ptp0' ${chrony_systemd_service}
fi fi
# The CC on s390x for fedora needs to be manually set to gcc when the golang is downloaded from the main page. # The CC on s390x for fedora needs to be manually set to gcc when the golang is downloaded from the main page.

View File

@ -23,7 +23,8 @@
# rootfs_dir populated with rootfs pkgs # rootfs_dir populated with rootfs pkgs
# It must provide a binary in /sbin/init # It must provide a binary in /sbin/init
# #
# Note: For some distros, the build_rootfs() function provided in scripts/lib.sh # Note: For some distros, the build_rootfs(), before_starting_container()
# and after_starting_container() functions provided in scripts/lib.sh
# will suffice. If a new distro is introduced with a special requirement, # will suffice. If a new distro is introduced with a special requirement,
# then, a rootfs_builder/<distro>/rootfs_lib.sh file should be created # then, a rootfs_builder/<distro>/rootfs_lib.sh file should be created
# using this template. # using this template.
@ -52,3 +53,19 @@ build_rootfs() {
# Populate ROOTFS_DIR # Populate ROOTFS_DIR
# Must provide /sbin/init and /bin/${BIN_AGENT} # Must provide /sbin/init and /bin/${BIN_AGENT}
} }
before_starting_container() {
# Run the following tasks before starting the container that builds the rootfs.
# For example:
# * Create a container
# * Create a volume
return 0
}
after_stopping_container() {
# Run the following tasks after stoping the container that builds the rootfs.
# For example:
# * Delete a container
# * Delete a volume
return 0
}

View File

@ -80,5 +80,8 @@ build_rootfs() {
${ROOTFS_DIR} ${ROOTFS_DIR}
chroot $ROOTFS_DIR ln -s /lib/systemd/systemd /usr/lib/systemd/systemd chroot $ROOTFS_DIR ln -s /lib/systemd/systemd /usr/lib/systemd/systemd
}
# Reduce image size and memory footprint
# removing not needed files and directories.
chroot $ROOTFS_DIR rm -rf /usr/share/{bash-completion,bug,doc,info,lintian,locale,man,menu,misc,pixmaps,terminfo,zoneinfo,zsh}
}

View File

@ -421,3 +421,11 @@ detect_musl_version()
[ "$?" == "0" ] && [ "$MUSL_VERSION" != "null" ] [ "$?" == "0" ] && [ "$MUSL_VERSION" != "null" ]
} }
before_starting_container() {
return 0
}
after_stopping_container() {
return 0
}

View File

@ -0,0 +1,48 @@
#!/bin/bash
#
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script apply patches.
#
set -e
script_dir="$(realpath $(dirname $0))"
patches_dir="$1"
if [ -z "$patches_dir" ]; then
cat <<-EOT
Apply patches to the sources at the current directory.
Patches are expected to be named in the standard git-format-patch(1) format where
the first part of the filename represents the patch ordering (lowest numbers
apply first):
'NUMBER-DASHED_DESCRIPTION.patch'
For example,
0001-fix-the-bad-thing.patch
0002-improve-the-fix-the-bad-thing-fix.patch
0003-correct-compiler-warnings.patch
Usage:
$0 PATCHES_DIR
Where:
PATCHES_DIR is the directory containing the patches
EOT
exit 1
fi
echo "INFO: Apply patches from $patches_dir"
if [ -d "$patches_dir" ]; then
patches=($(find "$patches_dir" -name '*.patch'|sort -t- -k1,1n))
echo "INFO: Found ${#patches[@]} patches"
for patch in ${patches[@]}; do
echo "INFO: Apply $patch"
git apply "$patch" || \
{ echo >&2 "ERROR: Not applied. Exiting..."; exit 1; }
done
else
echo "INFO: Patches directory does not exist"
fi

View File

@ -4,6 +4,7 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
from ubuntu:20.04 from ubuntu:20.04
ARG QEMU_DESTDIR
ARG QEMU_VIRTIOFS_REPO ARG QEMU_VIRTIOFS_REPO
# commit/tag/branch # commit/tag/branch
ARG QEMU_VIRTIOFS_TAG ARG QEMU_VIRTIOFS_TAG
@ -54,23 +55,24 @@ RUN git checkout "${QEMU_VIRTIOFS_TAG}"
ADD scripts/configure-hypervisor.sh /root/configure-hypervisor.sh ADD scripts/configure-hypervisor.sh /root/configure-hypervisor.sh
ADD qemu /root/kata_qemu ADD qemu /root/kata_qemu
ADD scripts/apply_patches.sh /root/apply_patches.sh
ADD static-build /root/static-build
# Apply experimental specific patches # Apply experimental specific patches
# Patches to quick fix virtiofs fork # Patches to quick fix virtiofs fork
ENV VIRTIOFS_PATCHES_DIR=/root/kata_qemu/patches/${QEMU_VIRTIOFS_TAG}/ ENV VIRTIOFS_PATCHES_DIR=/root/kata_qemu/patches/${QEMU_VIRTIOFS_TAG}/
RUN if [ -d ${VIRTIOFS_PATCHES_DIR} ]; then \ RUN /root/apply_patches.sh ${VIRTIOFS_PATCHES_DIR}
echo "Patches to apply for virtiofs fixes:"; \ # Apply the stable branch patches
for patch in $(find "${VIRTIOFS_PATCHES_DIR}" -name '*.patch' -type f |sort -t- -k1,1n); do \ RUN stable_branch=$(cat VERSION | awk 'BEGIN{FS=OFS="."}{print $1 "." $2 ".x"}') && \
git apply $patch; \ /root/apply_patches.sh "/root/kata_qemu/patches/${stable_branch}"
done;fi
RUN /root/kata_qemu/apply_patches.sh
RUN PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s kata-qemu | sed -e 's|--disable-seccomp||g' | xargs ./configure \ RUN PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s kata-qemu | sed -e 's|--disable-seccomp||g' | xargs ./configure \
--with-pkgversion=kata-static --with-pkgversion=kata-static
RUN make -j$(nproc) RUN make -j$(nproc)
RUN make -j$(nproc) virtiofsd RUN make -j$(nproc) virtiofsd
RUN make install DESTDIR=/tmp/qemu-virtiofs-static RUN make install DESTDIR="${QEMU_DESTDIR}"
RUN mv /tmp/qemu-virtiofs-static/"${PREFIX}"/bin/qemu-system-x86_64 /tmp/qemu-virtiofs-static/"${PREFIX}"/bin/qemu-virtiofs-system-x86_64 RUN cd "${QEMU_DESTDIR}/${PREFIX}" && \
RUN mv /tmp/qemu-virtiofs-static/"${PREFIX}"/libexec/kata-qemu/virtiofsd /tmp/qemu-virtiofs-static/opt/kata/bin/virtiofsd-dax mv bin/qemu-system-x86_64 bin/qemu-virtiofs-system-x86_64 && \
RUN cd /tmp/qemu-virtiofs-static && tar -czvf "${QEMU_TARBALL}" * mv libexec/kata-qemu/virtiofsd bin/virtiofsd-dax
RUN /root/static-build/scripts/qemu-build-post.sh

View File

@ -26,6 +26,7 @@ qemu_virtiofs_repo=$(get_from_kata_deps "assets.hypervisor.qemu-experimental.url
qemu_virtiofs_tag=$(get_from_kata_deps "assets.hypervisor.qemu-experimental.tag" "${kata_version}") qemu_virtiofs_tag=$(get_from_kata_deps "assets.hypervisor.qemu-experimental.tag" "${kata_version}")
qemu_virtiofs_tar="kata-static-qemu-virtiofsd.tar.gz" qemu_virtiofs_tar="kata-static-qemu-virtiofsd.tar.gz"
qemu_tmp_tar="kata-static-qemu-virtiofsd-tmp.tar.gz" qemu_tmp_tar="kata-static-qemu-virtiofsd-tmp.tar.gz"
qemu_destdir="/tmp/qemu-virtiofs-static"
info "Build ${qemu_virtiofs_repo} tag: ${qemu_virtiofs_tag}" info "Build ${qemu_virtiofs_repo} tag: ${qemu_virtiofs_tag}"
@ -37,6 +38,7 @@ sudo "${DOCKER_CLI}" build \
--no-cache \ --no-cache \
--build-arg http_proxy="${http_proxy}" \ --build-arg http_proxy="${http_proxy}" \
--build-arg https_proxy="${https_proxy}" \ --build-arg https_proxy="${https_proxy}" \
--build-arg QEMU_DESTDIR="${qemu_destdir}" \
--build-arg QEMU_VIRTIOFS_REPO="${qemu_virtiofs_repo}" \ --build-arg QEMU_VIRTIOFS_REPO="${qemu_virtiofs_repo}" \
--build-arg QEMU_VIRTIOFS_TAG="${qemu_virtiofs_tag}" \ --build-arg QEMU_VIRTIOFS_TAG="${qemu_virtiofs_tag}" \
--build-arg QEMU_TARBALL="${qemu_virtiofs_tar}" \ --build-arg QEMU_TARBALL="${qemu_virtiofs_tar}" \
@ -46,12 +48,9 @@ sudo "${DOCKER_CLI}" build \
-t qemu-virtiofs-static -t qemu-virtiofs-static
sudo "${DOCKER_CLI}" run \ sudo "${DOCKER_CLI}" run \
--rm \
-i \ -i \
-v "${PWD}":/share qemu-virtiofs-static \ -v "${PWD}":/share qemu-virtiofs-static \
mv "/tmp/qemu-virtiofs-static/${qemu_virtiofs_tar}" /share/ mv "${qemu_destdir}/${qemu_virtiofs_tar}" /share/
sudo chown ${USER}:${USER} "${PWD}/${qemu_virtiofs_tar}" sudo chown ${USER}:${USER} "${PWD}/${qemu_virtiofs_tar}"
# Remove blacklisted binaries
gzip -d < "${qemu_virtiofs_tar}" | tar --delete --wildcards -f - ${qemu_black_list[*]} | gzip > "${qemu_tmp_tar}"
mv -f "${qemu_tmp_tar}" "${qemu_virtiofs_tar}"

View File

@ -6,7 +6,7 @@ qemu_black_list=(
*/bin/qemu-pr-helper */bin/qemu-pr-helper
*/bin/virtfs-proxy-helper */bin/virtfs-proxy-helper
*/libexec/kata-qemu/qemu* */libexec/kata-qemu/qemu*
*/share/*/applications/ */share/*/applications
*/share/*/*.dtb */share/*/*.dtb
*/share/*/efi-e1000e.rom */share/*/efi-e1000e.rom
*/share/*/efi-e1000.rom */share/*/efi-e1000.rom
@ -15,9 +15,9 @@ qemu_black_list=(
*/share/*/efi-pcnet.rom */share/*/efi-pcnet.rom
*/share/*/efi-rtl8139.rom */share/*/efi-rtl8139.rom
*/share/*/efi-vmxnet3.rom */share/*/efi-vmxnet3.rom
*/share/*/icons/ */share/*/icons
*/share/*/*.img */share/*/*.img
*/share/*/keymaps/ */share/*/keymaps
*/share/*/multiboot.bin */share/*/multiboot.bin
*/share/*/openbios-ppc */share/*/openbios-ppc
*/share/*/openbios-sparc32 */share/*/openbios-sparc32

View File

@ -4,6 +4,7 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
from ubuntu:20.04 from ubuntu:20.04
ARG QEMU_DESTDIR
ARG QEMU_REPO ARG QEMU_REPO
# commit/tag/branch # commit/tag/branch
ARG QEMU_VERSION ARG QEMU_VERSION
@ -54,13 +55,16 @@ RUN git clone https://github.com/qemu/keycodemapdb.git ui/keycodemapdb
ADD scripts/configure-hypervisor.sh /root/configure-hypervisor.sh ADD scripts/configure-hypervisor.sh /root/configure-hypervisor.sh
ADD qemu /root/kata_qemu ADD qemu /root/kata_qemu
ADD scripts/apply_patches.sh /root/apply_patches.sh
ADD static-build /root/static-build
RUN /root/kata_qemu/apply_patches.sh RUN stable_branch=$(cat VERSION | awk 'BEGIN{FS=OFS="."}{print $1 "." $2 ".x"}') && \
/root/apply_patches.sh "/root/kata_qemu/patches/${stable_branch}"
RUN PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s kata-qemu | xargs ./configure \ RUN PREFIX="${PREFIX}" /root/configure-hypervisor.sh -s kata-qemu | xargs ./configure \
--with-pkgversion=kata-static --with-pkgversion=kata-static
RUN make -j$(nproc) RUN make -j$(nproc)
RUN make -j$(nproc) virtiofsd RUN make -j$(nproc) virtiofsd
RUN make install DESTDIR=/tmp/qemu-static RUN make install DESTDIR="${QEMU_DESTDIR}"
RUN cd /tmp/qemu-static && tar -czvf "${QEMU_TARBALL}" * RUN /root/static-build/scripts/qemu-build-post.sh

View File

@ -16,6 +16,7 @@ source "${script_dir}/../qemu.blacklist"
packaging_dir="${script_dir}/../.." packaging_dir="${script_dir}/../.."
qemu_tar="kata-static-qemu.tar.gz" qemu_tar="kata-static-qemu.tar.gz"
qemu_tmp_tar="kata-static-qemu-tmp.tar.gz" qemu_tmp_tar="kata-static-qemu-tmp.tar.gz"
qemu_destdir="/tmp/qemu-static/"
qemu_repo="${qemu_repo:-}" qemu_repo="${qemu_repo:-}"
qemu_version="${qemu_version:-}" qemu_version="${qemu_version:-}"
@ -45,6 +46,7 @@ sudo docker build \
--no-cache \ --no-cache \
--build-arg http_proxy="${http_proxy}" \ --build-arg http_proxy="${http_proxy}" \
--build-arg https_proxy="${https_proxy}" \ --build-arg https_proxy="${https_proxy}" \
--build-arg QEMU_DESTDIR="${qemu_destdir}" \
--build-arg QEMU_REPO="${qemu_repo}" \ --build-arg QEMU_REPO="${qemu_repo}" \
--build-arg QEMU_VERSION="${qemu_version}" \ --build-arg QEMU_VERSION="${qemu_version}" \
--build-arg QEMU_TARBALL="${qemu_tar}" \ --build-arg QEMU_TARBALL="${qemu_tar}" \
@ -54,12 +56,9 @@ sudo docker build \
-t qemu-static -t qemu-static
sudo docker run \ sudo docker run \
--rm \
-i \ -i \
-v "${PWD}":/share qemu-static \ -v "${PWD}":/share qemu-static \
mv "/tmp/qemu-static/${qemu_tar}" /share/ mv "${qemu_destdir}/${qemu_tar}" /share/
sudo chown ${USER}:${USER} "${PWD}/${qemu_tar}" sudo chown ${USER}:${USER} "${PWD}/${qemu_tar}"
# Remove blacklisted binaries
gzip -d < "${qemu_tar}" | tar --delete --wildcards -f - ${qemu_black_list[*]} | gzip > "${qemu_tmp_tar}"
mv -f "${qemu_tmp_tar}" "${qemu_tar}"

View File

@ -0,0 +1,28 @@
#!/bin/bash
#
# Copyright (c) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script process QEMU post-build.
#
set -e
script_dir="$(realpath $(dirname $0))"
source "${script_dir}/../qemu.blacklist"
if [[ -z "${QEMU_TARBALL}" || -z "${QEMU_DESTDIR}" ]]; then
echo "$0: needs QEMU_TARBALL and QEMU_DESTDIR exported"
exit 1
fi
pushd "${QEMU_DESTDIR}"
# Remove files to reduce the surface.
echo "INFO: remove uneeded files"
for pattern in ${qemu_black_list[@]}; do
find . -path "$pattern" | xargs rm -rfv
done
echo "INFO: create the tarball"
tar -czvf "${QEMU_TARBALL}" *
popd