Merge pull request #8484 from ChengyuZhu6/guest-pull

Merge basic guest pull image code to main
This commit is contained in:
Fabiano Fidêncio 2024-03-19 23:15:39 +01:00 committed by GitHub
commit 19eb45a27d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 3389 additions and 225 deletions

2437
src/agent/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -21,13 +21,16 @@ scopeguard = "1.0.0"
thiserror = "1.0.26"
regex = "1.5.6"
serial_test = "0.5.1"
oci-distribution = "0.10.0"
url = "2.5.0"
kata-sys-util = { path = "../libs/kata-sys-util" }
kata-types = { path = "../libs/kata-types" }
safe-path = { path = "../libs/safe-path" }
# Async helpers
async-trait = "0.1.42"
async-recursion = "0.3.2"
futures = "0.3.17"
futures = "0.3.30"
# Async runtime
tokio = { version = "1.28.1", features = ["full"] }
@ -73,10 +76,15 @@ reqwest = { version = "0.11.14", optional = true }
# The "vendored" feature for openssl is required for musl build
openssl = { version = "0.10.54", features = ["vendored"], optional = true }
# Image pull/decrypt
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "ca6b438", default-features = true, optional = true }
[dev-dependencies]
tempfile = "3.1.0"
test-utils = { path = "../libs/test-utils" }
which = "4.3.0"
rstest = "0.18.0"
async-std = { version = "1.12.0", features = ["attributes"] }
[workspace]
members = [
@ -87,9 +95,12 @@ members = [
lto = true
[features]
# The default-pull feature would support all pull types, including sharing images by virtio-fs and pulling images in the guest
default-pull = [ "guest-pull" ]
seccomp = ["rustjail/seccomp"]
standard-oci-runtime = ["rustjail/standard-oci-runtime"]
agent-policy = ["http", "openssl", "reqwest"]
guest-pull = ["image-rs", "openssl"]
[[bin]]
name = "kata-agent"

View File

@ -41,6 +41,16 @@ ifeq ($(AGENT_POLICY),yes)
override EXTRA_RUSTFEATURES += agent-policy
endif
##VAR PULL_TYPE=default|guest-pull define if agent enables the guest pull image feature
PULL_TYPE ?= default
ifeq ($(PULL_TYPE),default)
override EXTRA_RUSTFEATURES += default-pull
# Enable guest pull image feature of rust build
else ifeq ($(PULL_TYPE),guest-pull)
override EXTRA_RUSTFEATURES += guest-pull
endif
include ../../utils.mk
ifeq ($(ARCH), ppc64le)

View File

@ -126,8 +126,10 @@ The kata agent has the ability to configure agent options in guest kernel comman
| `agent.debug_console_vport` | Debug console port | Allow to specify the `vsock` port to connect the debugging console | integer | `0` |
| `agent.devmode` | Developer mode | Allow the agent process to coredump | boolean | `false` |
| `agent.hotplug_timeout` | Hotplug timeout | Allow to configure hotplug timeout(seconds) of block devices | integer | `3` |
| `agent.https_proxy` | HTTPS proxy | Allow to configure `https_proxy` in the guest | string | `""` |
| `agent.log` | Log level | Allow the agent log level to be changed (produces more or less output) | string | `"info"` |
| `agent.log_vport` | Log port | Allow to specify the `vsock` port to read logs | integer | `0` |
| `agent.no_proxy` | NO proxy | Allow to configure `no_proxy` in the guest | string | `""` |
| `agent.passfd_listener_port` | File descriptor passthrough IO listener port | Allow to set the file descriptor passthrough IO listener port | integer | `0` |
| `agent.server_addr` | Server address | Allow the ttRPC server address to be specified | string | `"vsock://-1:1024"` |
| `agent.trace` | Trace mode | Allow to static tracing | boolean | `false` |

View File

@ -10,6 +10,7 @@ use std::fs;
use std::str::FromStr;
use std::time;
use tracing::instrument;
use url::Url;
use kata_types::config::default::DEFAULT_AGENT_VSOCK_PORT;
@ -26,6 +27,11 @@ const CONTAINER_PIPE_SIZE_OPTION: &str = "agent.container_pipe_size";
const UNIFIED_CGROUP_HIERARCHY_OPTION: &str = "agent.unified_cgroup_hierarchy";
const CONFIG_FILE: &str = "agent.config_file";
// Configure the proxy settings for HTTPS requests in the guest,
// to solve the problem of not being able to access the specified image in some cases.
const HTTPS_PROXY: &str = "agent.https_proxy";
const NO_PROXY: &str = "agent.no_proxy";
const DEFAULT_LOG_LEVEL: slog::Level = slog::Level::Info;
const DEFAULT_HOTPLUG_TIMEOUT: time::Duration = time::Duration::from_secs(3);
const DEFAULT_CONTAINER_PIPE_SIZE: i32 = 0;
@ -66,6 +72,8 @@ pub struct AgentConfig {
pub unified_cgroup_hierarchy: bool,
pub tracing: bool,
pub supports_seccomp: bool,
pub https_proxy: String,
pub no_proxy: String,
}
#[derive(Debug, Deserialize)]
@ -81,6 +89,8 @@ pub struct AgentConfigBuilder {
pub passfd_listener_port: Option<i32>,
pub unified_cgroup_hierarchy: Option<bool>,
pub tracing: Option<bool>,
pub https_proxy: Option<String>,
pub no_proxy: Option<String>,
}
macro_rules! config_override {
@ -142,6 +152,8 @@ impl Default for AgentConfig {
unified_cgroup_hierarchy: false,
tracing: false,
supports_seccomp: rpc::have_seccomp(),
https_proxy: String::from(""),
no_proxy: String::from(""),
}
}
}
@ -171,6 +183,8 @@ impl FromStr for AgentConfig {
config_override!(agent_config_builder, agent_config, passfd_listener_port);
config_override!(agent_config_builder, agent_config, unified_cgroup_hierarchy);
config_override!(agent_config_builder, agent_config, tracing);
config_override!(agent_config_builder, agent_config, https_proxy);
config_override!(agent_config_builder, agent_config, no_proxy);
Ok(agent_config)
}
@ -270,6 +284,8 @@ impl AgentConfig {
config.unified_cgroup_hierarchy,
get_bool_value
);
parse_cmdline_param!(param, HTTPS_PROXY, config.https_proxy, get_url_value);
parse_cmdline_param!(param, NO_PROXY, config.no_proxy, get_string_value);
}
if let Ok(addr) = env::var(SERVER_ADDR_ENV_VAR) {
@ -417,6 +433,12 @@ fn get_container_pipe_size(param: &str) -> Result<i32> {
Ok(value)
}
#[instrument]
fn get_url_value(param: &str) -> Result<String> {
let value = get_string_value(param)?;
Ok(Url::parse(&value)?.to_string())
}
#[cfg(test)]
mod tests {
use test_utils::assert_result;
@ -453,6 +475,8 @@ mod tests {
server_addr: &'a str,
unified_cgroup_hierarchy: bool,
tracing: bool,
https_proxy: &'a str,
no_proxy: &'a str,
}
impl Default for TestData<'_> {
@ -468,6 +492,8 @@ mod tests {
server_addr: TEST_SERVER_ADDR,
unified_cgroup_hierarchy: false,
tracing: false,
https_proxy: "",
no_proxy: "",
}
}
}
@ -837,6 +863,26 @@ mod tests {
tracing: true,
..Default::default()
},
TestData {
contents: "agent.https_proxy=http://proxy.url.com:81/",
https_proxy: "http://proxy.url.com:81/",
..Default::default()
},
TestData {
contents: "agent.https_proxy=http://192.168.1.100:81/",
https_proxy: "http://192.168.1.100:81/",
..Default::default()
},
TestData {
contents: "agent.no_proxy=*.internal.url.com",
no_proxy: "*.internal.url.com",
..Default::default()
},
TestData {
contents: "agent.no_proxy=192.168.1.0/24,172.16.0.0/12",
no_proxy: "192.168.1.0/24,172.16.0.0/12",
..Default::default()
},
];
let dir = tempdir().expect("failed to create tmpdir");
@ -884,6 +930,8 @@ mod tests {
assert_eq!(d.container_pipe_size, config.container_pipe_size, "{}", msg);
assert_eq!(d.server_addr, config.server_addr, "{}", msg);
assert_eq!(d.tracing, config.tracing, "{}", msg);
assert_eq!(d.https_proxy, config.https_proxy, "{}", msg);
assert_eq!(d.no_proxy, config.no_proxy, "{}", msg);
for v in vars_to_unset {
env::remove_var(v);

336
src/agent/src/image.rs Normal file
View File

@ -0,0 +1,336 @@
// Copyright (c) 2021 Alibaba Cloud
// Copyright (c) 2021, 2023 IBM Corporation
// Copyright (c) 2022 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
use safe_path::scoped_join;
use std::collections::HashMap;
use std::env;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use anyhow::{anyhow, bail, Context, Result};
use image_rs::image::ImageClient;
use kata_sys_util::validate::verify_id;
use tokio::sync::Mutex;
use crate::rpc::CONTAINER_BASE;
use crate::AGENT_CONFIG;
// A marker to merge container spec for images pulled inside guest.
const ANNO_K8S_IMAGE_NAME: &str = "io.kubernetes.cri.image-name";
const KATA_IMAGE_WORK_DIR: &str = "/run/kata-containers/image/";
const CONFIG_JSON: &str = "config.json";
const KATA_PAUSE_BUNDLE: &str = "/pause_bundle";
const K8S_CONTAINER_TYPE_KEYS: [&str; 2] = [
"io.kubernetes.cri.container-type",
"io.kubernetes.cri-o.ContainerType",
];
#[rustfmt::skip]
lazy_static! {
pub static ref IMAGE_SERVICE: Mutex<Option<ImageService>> = Mutex::new(None);
}
// Convenience function to obtain the scope logger.
fn sl() -> slog::Logger {
slog_scope::logger().new(o!("subsystem" => "image"))
}
#[derive(Clone)]
pub struct ImageService {
image_client: Arc<Mutex<ImageClient>>,
images: Arc<Mutex<HashMap<String, String>>>,
}
impl ImageService {
pub fn new() -> Self {
Self {
image_client: Arc::new(Mutex::new(ImageClient::new(PathBuf::from(
KATA_IMAGE_WORK_DIR,
)))),
images: Arc::new(Mutex::new(HashMap::new())),
}
}
/// Get the singleton instance of image service.
pub async fn singleton() -> Result<ImageService> {
IMAGE_SERVICE
.lock()
.await
.clone()
.ok_or_else(|| anyhow!("image service is uninitialized"))
}
async fn add_image(&self, image: String, cid: String) {
self.images.lock().await.insert(image, cid);
}
/// pause image is packaged in rootfs
fn unpack_pause_image(cid: &str, target_subpath: &str) -> Result<String> {
verify_id(cid).context("The guest pause image cid contains invalid characters.")?;
let guest_pause_bundle = Path::new(KATA_PAUSE_BUNDLE);
if !guest_pause_bundle.exists() {
bail!("Pause image not present in rootfs");
}
info!(sl(), "use guest pause image cid {:?}", cid);
let pause_bundle = Path::new(CONTAINER_BASE).join(cid).join(target_subpath);
let pause_rootfs = pause_bundle.join("rootfs");
fs::create_dir_all(&pause_rootfs)?;
let copy_if_not_exists = |src: &Path, dst: &Path| -> Result<()> {
if !dst.exists() {
info!(sl(), "copying file {src:?} to {dst:?}");
fs::copy(src, dst)?;
}
Ok(())
};
copy_if_not_exists(
&guest_pause_bundle.join(CONFIG_JSON),
&pause_bundle.join(CONFIG_JSON),
)?;
copy_if_not_exists(
&guest_pause_bundle.join("rootfs/pause"),
&pause_rootfs.join("pause"),
)?;
Ok(pause_rootfs.display().to_string())
}
/// pull_image is used for call image-rs to pull image in the guest.
/// # Parameters
/// - `image`: Image name (exp: quay.io/prometheus/busybox:latest)
/// - `cid`: Container id
/// - `image_metadata`: Annotations about the image (exp: "containerd.io/snapshot/cri.layer-digest": "sha256:24fb2886d6f6c5d16481dd7608b47e78a8e92a13d6e64d87d57cb16d5f766d63")
/// # Returns
/// - The image rootfs bundle path. (exp. /run/kata-containers/cb0b47276ea66ee9f44cc53afa94d7980b57a52c3f306f68cb034e58d9fbd3c6/images/rootfs)
pub async fn pull_image(
&self,
image: &str,
cid: &str,
image_metadata: &HashMap<String, String>,
) -> Result<String> {
info!(sl(), "image metadata: {image_metadata:?}");
//Check whether the image is for sandbox or for container.
let mut is_sandbox = false;
for key in K8S_CONTAINER_TYPE_KEYS.iter() {
if let Some(value) = image_metadata.get(key as &str) {
if value == "sandbox" {
is_sandbox = true;
break;
}
}
}
if is_sandbox {
let mount_path = Self::unpack_pause_image(cid, "pause")?;
self.add_image(String::from(image), String::from(cid)).await;
return Ok(mount_path);
}
// Image layers will store at KATA_IMAGE_WORK_DIR, generated bundles
// with rootfs and config.json will store under CONTAINER_BASE/cid/images.
let bundle_base_dir = scoped_join(CONTAINER_BASE, cid)?;
fs::create_dir_all(&bundle_base_dir)?;
let bundle_path = scoped_join(&bundle_base_dir, "images")?;
fs::create_dir_all(&bundle_path)?;
info!(sl(), "pull image {image:?}, bundle path {bundle_path:?}");
let res = self
.image_client
.lock()
.await
.pull_image(image, &bundle_path, &None, &None)
.await;
match res {
Ok(image) => {
info!(
sl(),
"pull and unpack image {image:?}, cid: {cid:?} succeeded."
);
}
Err(e) => {
error!(
sl(),
"pull and unpack image {image:?}, cid: {cid:?} failed with {:?}.",
e.to_string()
);
return Err(e);
}
};
self.add_image(String::from(image), String::from(cid)).await;
let image_bundle_path = scoped_join(&bundle_path, "rootfs")?;
Ok(image_bundle_path.as_path().display().to_string())
}
/// When being passed an image name through a container annotation, merge its
/// corresponding bundle OCI specification into the passed container creation one.
pub async fn merge_bundle_oci(&self, container_oci: &mut oci::Spec) -> Result<()> {
if let Some(image_name) = container_oci.annotations.get(ANNO_K8S_IMAGE_NAME) {
let images = self.images.lock().await;
if let Some(container_id) = images.get(image_name) {
let image_oci_config_path = Path::new(CONTAINER_BASE)
.join(container_id)
.join(CONFIG_JSON);
debug!(
sl(),
"Image bundle config path: {:?}", image_oci_config_path
);
let image_oci =
oci::Spec::load(image_oci_config_path.to_str().ok_or_else(|| {
anyhow!(
"Invalid container image OCI config path {:?}",
image_oci_config_path
)
})?)
.context("load image bundle")?;
if let (Some(container_root), Some(image_root)) =
(container_oci.root.as_mut(), image_oci.root.as_ref())
{
let root_path = Path::new(CONTAINER_BASE)
.join(container_id)
.join(image_root.path.clone());
container_root.path = String::from(root_path.to_str().ok_or_else(|| {
anyhow!("Invalid container image root path {:?}", root_path)
})?);
}
if let (Some(container_process), Some(image_process)) =
(container_oci.process.as_mut(), image_oci.process.as_ref())
{
self.merge_oci_process(container_process, image_process);
}
}
}
Ok(())
}
/// Partially merge an OCI process specification into another one.
fn merge_oci_process(&self, target: &mut oci::Process, source: &oci::Process) {
// Override the target args only when the target args is empty and source.args is not empty
if target.args.is_empty() && !source.args.is_empty() {
target.args.append(&mut source.args.clone());
}
// Override the target cwd only when the target cwd is blank and source.cwd is not blank
if target.cwd == "/" && source.cwd != "/" {
target.cwd = String::from(&source.cwd);
}
for source_env in &source.env {
if let Some((variable_name, variable_value)) = source_env.split_once('=') {
debug!(
sl(),
"source spec environment variable: {variable_name:?} : {variable_value:?}"
);
if !target.env.iter().any(|i| i.contains(variable_name)) {
target.env.push(source_env.to_string());
}
}
}
}
}
/// Set proxy environment from AGENT_CONFIG
pub async fn set_proxy_env_vars() {
if env::var("HTTPS_PROXY").is_err() {
let https_proxy = &AGENT_CONFIG.https_proxy;
if !https_proxy.is_empty() {
env::set_var("HTTPS_PROXY", https_proxy);
}
}
match env::var("HTTPS_PROXY") {
Ok(val) => info!(sl(), "https_proxy is set to: {}", val),
Err(e) => info!(sl(), "https_proxy is not set ({})", e),
};
if env::var("NO_PROXY").is_err() {
let no_proxy = &AGENT_CONFIG.no_proxy;
if !no_proxy.is_empty() {
env::set_var("NO_PROXY", no_proxy);
}
}
match env::var("NO_PROXY") {
Ok(val) => info!(sl(), "no_proxy is set to: {}", val),
Err(e) => info!(sl(), "no_proxy is not set ({})", e),
};
}
#[cfg(test)]
mod tests {
use super::ImageService;
use rstest::rstest;
#[rstest]
// TODO - how can we tell the user didn't specifically set it to `/` vs not setting at all? Is that scenario valid?
#[case::image_cwd_should_override_blank_container_cwd("/", "/imageDir", "/imageDir")]
#[case::container_cwd_should_override_image_cwd("/containerDir", "/imageDir", "/containerDir")]
#[case::container_cwd_should_override_blank_image_cwd("/containerDir", "/", "/containerDir")]
async fn test_merge_cwd(
#[case] container_process_cwd: &str,
#[case] image_process_cwd: &str,
#[case] expected: &str,
) {
let image_service = ImageService::new();
let mut container_process = oci::Process {
cwd: container_process_cwd.to_string(),
..Default::default()
};
let image_process = oci::Process {
cwd: image_process_cwd.to_string(),
..Default::default()
};
image_service.merge_oci_process(&mut container_process, &image_process);
assert_eq!(expected, container_process.cwd);
}
#[rstest]
#[case::pods_environment_overrides_images(
vec!["ISPRODUCTION=true".to_string()],
vec!["ISPRODUCTION=false".to_string()],
vec!["ISPRODUCTION=true".to_string()]
)]
#[case::multiple_environment_variables_can_be_overrided(
vec!["ISPRODUCTION=true".to_string(), "ISDEVELOPMENT=false".to_string()],
vec!["ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()],
vec!["ISPRODUCTION=true".to_string(), "ISDEVELOPMENT=false".to_string()]
)]
#[case::not_override_them_when_none_of_variables_match(
vec!["ANOTHERENV=TEST".to_string()],
vec!["ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()],
vec!["ANOTHERENV=TEST".to_string(), "ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()]
)]
#[case::a_mix_of_both_overriding_and_not(
vec!["ANOTHERENV=TEST".to_string(), "ISPRODUCTION=true".to_string()],
vec!["ISPRODUCTION=false".to_string(), "ISDEVELOPMENT=true".to_string()],
vec!["ANOTHERENV=TEST".to_string(), "ISPRODUCTION=true".to_string(), "ISDEVELOPMENT=true".to_string()]
)]
async fn test_merge_env(
#[case] container_process_env: Vec<String>,
#[case] image_process_env: Vec<String>,
#[case] expected: Vec<String>,
) {
let image_service = ImageService::new();
let mut container_process = oci::Process {
env: container_process_env,
..Default::default()
};
let image_process = oci::Process {
env: image_process_env,
..Default::default()
};
image_service.merge_oci_process(&mut container_process, &image_process);
assert_eq!(expected, container_process.env);
}
}

View File

@ -73,6 +73,9 @@ use tokio::{
task::JoinHandle,
};
#[cfg(feature = "guest-pull")]
mod image;
mod rpc;
mod tracer;
@ -348,6 +351,9 @@ async fn start_sandbox(
s.rtnl.handle_localhost().await?;
}
#[cfg(feature = "guest-pull")]
image::set_proxy_env_vars().await;
// - When init_mode is true, enabling the localhost link during the
// handle_localhost call above is required before starting OPA with the
// initialize_policy call below.
@ -379,7 +385,7 @@ async fn start_sandbox(
sandbox.lock().await.sender = Some(tx);
// vsock:///dev/vsock, port
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode)?;
let mut server = rpc::start(sandbox.clone(), config.server_addr.as_str(), init_mode).await?;
server.start().await?;
rx.await?;

View File

@ -73,6 +73,9 @@ use crate::tracer::extract_carrier_from_ttrpc;
#[cfg(feature = "agent-policy")]
use crate::policy::{do_set_policy, is_allowed};
#[cfg(feature = "guest-pull")]
use crate::image;
use opentelemetry::global;
use tracing::span;
use tracing_opentelemetry::OpenTelemetrySpanExt;
@ -199,6 +202,14 @@ impl AgentService {
"receive createcontainer, storages: {:?}", &req.storages
);
// In case of pulling image inside guest, we need to merge the image bundle OCI spec
// into the container creation request OCI spec.
#[cfg(feature = "guest-pull")]
{
let image_service = image::ImageService::singleton().await?;
image_service.merge_bundle_oci(&mut oci).await?;
}
// Some devices need some extra processing (the ones invoked with
// --device for instance), and that's what this call is doing. It
// updates the devices listed in the OCI spec, so that they actually
@ -1583,7 +1594,11 @@ async fn read_stream(reader: &Mutex<ReadHalf<PipeStream>>, l: usize) -> Result<V
Ok(content)
}
pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str, init_mode: bool) -> Result<TtrpcServer> {
pub async fn start(
s: Arc<Mutex<Sandbox>>,
server_address: &str,
init_mode: bool,
) -> Result<TtrpcServer> {
let agent_service = Box::new(AgentService {
sandbox: s,
init_mode,
@ -1593,6 +1608,11 @@ pub fn start(s: Arc<Mutex<Sandbox>>, server_address: &str, init_mode: bool) -> R
let health_service = Box::new(HealthService {}) as Box<dyn health_ttrpc::Health + Send + Sync>;
let hservice = health_ttrpc::create_health(Arc::new(health_service));
#[cfg(feature = "guest-pull")]
{
let image_service = image::ImageService::new();
*image::IMAGE_SERVICE.lock().await = Some(image_service.clone());
}
let server = TtrpcServer::new()
.bind(server_address)?
.register_service(aservice)

View File

@ -0,0 +1,101 @@
// Copyright (c) 2023 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
use crate::image;
use crate::storage::{StorageContext, StorageHandler};
use anyhow::{anyhow, Result};
use kata_types::mount::KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL;
use kata_types::mount::{ImagePullVolume, StorageDevice};
use protocols::agent::Storage;
use std::sync::Arc;
use tracing::instrument;
use super::{common_storage_handler, new_device};
#[derive(Debug)]
pub struct ImagePullHandler {}
impl ImagePullHandler {
fn get_image_info(storage: &Storage) -> Result<ImagePullVolume> {
for option in storage.driver_options.iter() {
if let Some((key, value)) = option.split_once('=') {
if key == KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL {
let imagepull_volume: ImagePullVolume = serde_json::from_str(value)?;
return Ok(imagepull_volume);
}
}
}
Err(anyhow!("missing Image information for ImagePull volume"))
}
}
#[async_trait::async_trait]
impl StorageHandler for ImagePullHandler {
#[instrument]
async fn create_device(
&self,
mut storage: Storage,
ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> {
//Currently the image metadata is not used to pulling image in the guest.
let image_pull_volume = Self::get_image_info(&storage)?;
debug!(ctx.logger, "image_pull_volume = {:?}", image_pull_volume);
let image_name = storage.source();
debug!(ctx.logger, "image_name = {:?}", image_name);
let cid = ctx
.cid
.clone()
.ok_or_else(|| anyhow!("failed to get container id"))?;
let image_service = image::ImageService::singleton().await?;
let bundle_path = image_service
.pull_image(image_name, &cid, &image_pull_volume.metadata)
.await?;
storage.source = bundle_path;
storage.options = vec!["bind".to_string(), "ro".to_string()];
common_storage_handler(ctx.logger, &storage)?;
new_device(storage.mount_point)
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use kata_types::mount::{ImagePullVolume, KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL};
use protocols::agent::Storage;
use crate::storage::image_pull_handler::ImagePullHandler;
#[test]
fn test_get_image_info() {
let mut res = HashMap::new();
res.insert("key1".to_string(), "value1".to_string());
res.insert("key2".to_string(), "value2".to_string());
let image_pull = ImagePullVolume {
metadata: res.clone(),
};
let image_pull_str = serde_json::to_string(&image_pull);
assert!(image_pull_str.is_ok());
let storage = Storage {
driver: KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL.to_string(),
driver_options: vec![format!("image_guest_pull={}", image_pull_str.ok().unwrap())],
..Default::default()
};
match ImagePullHandler::get_image_info(&storage) {
Ok(image_info) => {
assert_eq!(image_info.metadata, res);
}
Err(e) => panic!("err = {}", e),
}
}
}

View File

@ -12,6 +12,8 @@ use std::sync::Arc;
use anyhow::{anyhow, Context, Result};
use kata_sys_util::mount::{create_mount_destination, parse_mount_options};
#[cfg(feature = "guest-pull")]
use kata_types::mount::KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL;
use kata_types::mount::{StorageDevice, StorageHandlerManager, KATA_SHAREDFS_GUEST_PREMOUNT_TAG};
use nix::unistd::{Gid, Uid};
use protocols::agent::Storage;
@ -24,6 +26,8 @@ use self::bind_watcher_handler::BindWatcherHandler;
use self::block_handler::{PmemHandler, ScsiHandler, VirtioBlkMmioHandler, VirtioBlkPciHandler};
use self::ephemeral_handler::EphemeralHandler;
use self::fs_handler::{OverlayfsHandler, Virtio9pHandler, VirtioFsHandler};
#[cfg(feature = "guest-pull")]
use self::image_pull_handler::ImagePullHandler;
use self::local_handler::LocalHandler;
use crate::device::{
DRIVER_9P_TYPE, DRIVER_BLK_MMIO_TYPE, DRIVER_BLK_PCI_TYPE, DRIVER_EPHEMERAL_TYPE,
@ -39,6 +43,8 @@ mod bind_watcher_handler;
mod block_handler;
mod ephemeral_handler;
mod fs_handler;
#[cfg(feature = "guest-pull")]
mod image_pull_handler;
mod local_handler;
const RW_MASK: u32 = 0o660;
@ -145,6 +151,8 @@ lazy_static! {
manager.add_handler(DRIVER_SCSI_TYPE, Arc::new(ScsiHandler{})).unwrap();
manager.add_handler(DRIVER_VIRTIOFS_TYPE, Arc::new(VirtioFsHandler{})).unwrap();
manager.add_handler(DRIVER_WATCHABLE_BIND_TYPE, Arc::new(BindWatcherHandler{})).unwrap();
#[cfg(feature = "guest-pull")]
manager.add_handler(KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL, Arc::new(ImagePullHandler{})).unwrap();
manager
};
}

View File

@ -486,9 +486,11 @@ func handleVirtualVolume(c *Container) ([]*grpc.Storage, string, error) {
volumeType = virtVolume.VolumeType
var vol *grpc.Storage
vol, err = handleVirtualVolumeStorageObject(c, "", virtVolume)
if err != nil {
return nil, "", err
if volumeType == types.KataVirtualVolumeImageGuestPullType {
vol, err = handleVirtualVolumeStorageObject(c, "", virtVolume)
if err != nil {
return nil, "", err
}
}
if vol != nil {

View File

@ -36,6 +36,8 @@ import (
"context"
ctrAnnotations "github.com/containerd/containerd/pkg/cri/annotations"
podmanAnnotations "github.com/containers/podman/v4/pkg/annotations"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
@ -1580,9 +1582,73 @@ func handleBlockVolume(c *Container, device api.Device) (*grpc.Storage, error) {
return vol, nil
}
// getContainerTypeforCRI get container type from different CRI annotations
func getContainerTypeforCRI(c *Container) (string, string) {
// CRIContainerTypeKeyList lists all the CRI keys that could define
// the container type from annotations in the config.json.
CRIContainerTypeKeyList := []string{ctrAnnotations.ContainerType, podmanAnnotations.ContainerType}
containerType := c.config.Annotations[vcAnnotations.ContainerTypeKey]
for _, key := range CRIContainerTypeKeyList {
_, ok := c.config.CustomSpec.Annotations[key]
if ok {
return containerType, key
}
}
return "", ""
}
func handleImageGuestPullBlockVolume(c *Container, virtualVolumeInfo *types.KataVirtualVolume, vol *grpc.Storage) (*grpc.Storage, error) {
container_annotations := c.GetAnnotations()
containerType, criContainerType := getContainerTypeforCRI(c)
var image_ref string
if containerType == string(PodSandbox) {
image_ref = "pause"
} else {
switch criContainerType {
case ctrAnnotations.ContainerType:
image_ref = container_annotations["io.kubernetes.cri.image-name"]
case podmanAnnotations.ContainerType:
image_ref = container_annotations["io.kubernetes.cri-o.ImageName"]
default:
image_ref = ""
}
if image_ref == "" {
return nil, fmt.Errorf("Failed to get image name from annotations")
}
}
virtualVolumeInfo.Source = image_ref
//merge virtualVolumeInfo.ImagePull.Metadata and container_annotations
for k, v := range container_annotations {
virtualVolumeInfo.ImagePull.Metadata[k] = v
}
no, err := json.Marshal(virtualVolumeInfo.ImagePull)
if err != nil {
return nil, err
}
vol.Driver = types.KataVirtualVolumeImageGuestPullType
vol.DriverOptions = append(vol.DriverOptions, types.KataVirtualVolumeImageGuestPullType+"="+string(no))
vol.Source = virtualVolumeInfo.Source
vol.Fstype = typeOverlayFS
return vol, nil
}
// handleVirtualVolumeStorageObject handles KataVirtualVolume that is block device file.
func handleVirtualVolumeStorageObject(c *Container, blockDeviceId string, virtVolume *types.KataVirtualVolume) (*grpc.Storage, error) {
var vol *grpc.Storage = &grpc.Storage{}
var vol *grpc.Storage
if virtVolume.VolumeType == types.KataVirtualVolumeImageGuestPullType {
var err error
vol = &grpc.Storage{}
vol, err = handleImageGuestPullBlockVolume(c, virtVolume, vol)
if err != nil {
return nil, err
}
vol.MountPoint = filepath.Join("/run/kata-containers/", c.id, c.rootfsSuffix)
}
return vol, nil
}

View File

@ -27,19 +27,40 @@ setup() {
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable debug for Kata Containers
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[1].value' --tag '!!str' "true"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[1].value' \
--tag '!!str' "true"
# Create the runtime class only for the shim that's being tested
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[2].value' "${KATA_HYPERVISOR}"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
# Set the tested hypervisor as the default `kata` shim
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[3].value' "${KATA_HYPERVISOR}"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[+].name' "HOST_OS"
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[-1].value' "${KATA_HOST_OS}"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[+].name' \
"HOST_OS"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[-1].value' \
"${KATA_HOST_OS}"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
@ -112,13 +133,25 @@ teardown() {
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
# Create the runtime class only for the shim that's being tested
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[2].value' "${KATA_HYPERVISOR}"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
# Set the tested hypervisor as the default `kata` shim
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[3].value' "${KATA_HYPERVISOR}"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i "${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
yq write -i \
"${repo_root_dir}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${repo_root_dir}/tools/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"

View File

@ -6,6 +6,10 @@
#
source "${BATS_TEST_DIRNAME}/tests_common.sh"
source "${BATS_TEST_DIRNAME}/../../common.bash"
SUPPORTED_TEE_HYPERVISORS=("qemu-sev" "qemu-snp" "qemu-tdx" "qemu-se")
SUPPORTED_NON_TEE_HYPERVISORS=("qemu")
function setup_unencrypted_confidential_pod() {
get_pod_config_dir
@ -33,3 +37,27 @@ function get_remote_command_per_hypervisor() {
echo "${REMOTE_COMMAND_PER_HYPERVISOR[${KATA_HYPERVISOR}]}"
}
# This function verifies whether the input hypervisor supports confidential tests and
# relies on `KATA_HYPERVISOR` being an environment variable
function check_hypervisor_for_confidential_tests() {
local kata_hypervisor="${1}"
# This check must be done with "<SPACE>${KATA_HYPERVISOR}<SPACE>" to avoid
# having substrings, like qemu, being matched with qemu-$something.
if [[ " ${SUPPORTED_TEE_HYPERVISORS[*]} " =~ " ${kata_hypervisor} " ]] ||\
[[ " ${SUPPORTED_NON_TEE_HYPERVISORS[*]} " =~ " ${kata_hypervisor} " ]]; then
return 0
else
return 1
fi
}
# Common setup for confidential tests.
function confidential_setup() {
ensure_yq
if ! check_hypervisor_for_confidential_tests "${KATA_HYPERVISOR}"; then
return 1
elif [[ " ${SUPPORTED_NON_TEE_HYPERVISORS[*]} " =~ " ${KATA_HYPERVISOR} " ]]; then
info "Need to apply image annotations"
fi
}

View File

@ -12,7 +12,9 @@ test:
- cri-containerd
kubernetes:
- k8s-confidential
- k8s-cpu-ns
- k8s-guest-pull-image
- k8s-limit-range
- k8s-number-cpus
- k8s-expose-ip

View File

@ -5,6 +5,8 @@
kubernetes:
- k8s-block-volume
- k8s-confidential
- k8s-guest-pull-image
- k8s-limit-range
- k8s-number-cpus
- k8s-oom

View File

@ -29,6 +29,8 @@ KBS=${KBS:-false}
KBS_INGRESS=${KBS_INGRESS:-}
KUBERNETES="${KUBERNETES:-}"
SNAPSHOTTER="${SNAPSHOTTER:-}"
HTTPS_PROXY="${HTTPS_PROXY:-${https_proxy:-}}"
NO_PROXY="${NO_PROXY:-${no_proxy:-}}"
export AUTO_GENERATE_POLICY="${AUTO_GENERATE_POLICY:-no}"
export TEST_CLUSTER_NAMESPACE="${TEST_CLUSTER_NAMESPACE:-kata-containers-k8s-tests}"
@ -135,26 +137,75 @@ function deploy_kata() {
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
# Enable debug for Kata Containers
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[1].value' --tag '!!str' "true"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[1].value' \
--tag '!!str' "true"
# Create the runtime class only for the shim that's being tested
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[2].value' "${KATA_HYPERVISOR}"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
# Set the tested hypervisor as the default `kata` shim
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[3].value' "${KATA_HYPERVISOR}"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
# Enable 'default_vcpus' hypervisor annotation
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[6].value' "default_vcpus"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"default_vcpus"
if [ -n "${SNAPSHOTTER}" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[7].value' \
"${KATA_HYPERVISOR}:${SNAPSHOTTER}"
fi
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[6].value' "initrd kernel default_vcpus"
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[+].name' "HOST_OS"
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[-1].value' "${KATA_HOST_OS}"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"initrd kernel default_vcpus"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[+].name' \
"HOST_OS"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[-1].value' \
"${KATA_HOST_OS}"
fi
if [ "${KATA_HYPERVISOR}" = "qemu" ]; then
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[6].value' "image initrd kernel default_vcpus"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[6].value' \
"image initrd kernel default_vcpus"
fi
if [ "${KATA_HYPERVISOR}" = "qemu-tdx" ]; then
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[8].value' \
"${HTTPS_PROXY}"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[9].value' \
"${NO_PROXY}"
fi
echo "::group::Final kata-deploy.yaml that is used in the test"
@ -308,13 +359,25 @@ function cleanup_kata_deploy() {
kubectl -n kube-system wait --timeout=10m --for=delete -l name=kata-deploy pod
# Let the `kata-deploy` script take care of the runtime class creation / removal
yq write -i "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[4].value' \
--tag '!!str' "true"
# Create the runtime class only for the shim that's being tested
yq write -i "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[2].value' "${KATA_HYPERVISOR}"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[2].value' \
"${KATA_HYPERVISOR}"
# Set the tested hypervisor as the default `kata` shim
yq write -i "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" 'spec.template.spec.containers[0].env[3].value' "${KATA_HYPERVISOR}"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml" \
'spec.template.spec.containers[0].env[3].value' \
"${KATA_HYPERVISOR}"
# Let the `kata-deploy` create the default `kata` runtime class
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
yq write -i \
"${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" \
'spec.template.spec.containers[0].env[5].value' \
--tag '!!str' "true"
sed -i -e "s|quay.io/kata-containers/kata-deploy:latest|${DOCKER_REGISTRY}/${DOCKER_REPO}:${DOCKER_TAG}|g" "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
cat "${tools_dir}/packaging/kata-deploy/kata-cleanup/base/kata-cleanup.yaml"
@ -385,17 +448,29 @@ function deploy_nydus_snapshotter() {
cleanup_nydus_snapshotter || true
if [ "${PULL_TYPE}" == "guest-pull" ]; then
# Enable guest pull feature in nydus snapshotter
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.FS_DRIVER' "proxy" --style=double
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.FS_DRIVER' \
"proxy" --style=double
else
>&2 echo "Invalid pull type"; exit 2
fi
# Disable to read snapshotter config from configmap
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.ENABLE_CONFIG_FROM_VOLUME' "false" --style=double
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_CONFIG_FROM_VOLUME' \
"false" --style=double
# Enable to run snapshotter as a systemd service
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.ENABLE_SYSTEMD_SERVICE' "true" --style=double
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_SYSTEMD_SERVICE' \
"true" --style=double
# Enable "runtime specific snapshotter" feature in containerd when configuring containerd for snapshotter
yq write -i misc/snapshotter/base/nydus-snapshotter.yaml 'data.ENABLE_RUNTIME_SPECIFIC_SNAPSHOTTER' "true" --style=double
yq write -i \
misc/snapshotter/base/nydus-snapshotter.yaml \
'data.ENABLE_RUNTIME_SPECIFIC_SNAPSHOTTER' \
"true" --style=double
# Deploy nydus snapshotter as a daemonset
kubectl create -f "misc/snapshotter/nydus-snapshotter-rbac.yaml"

View File

@ -10,21 +10,8 @@ load "${BATS_TEST_DIRNAME}/confidential_common.sh"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
setup() {
SUPPORTED_TEE_HYPERVISORS=("qemu-sev" "qemu-snp" "qemu-tdx" "qemu-se")
SUPPORTED_NON_TEE_HYPERVISORS=("qemu")
# This check must be done with "<SPACE>${KATA_HYPERVISOR}<SPACE>" to avoid
# having substrings, like qemu, being matched with qemu-$something.
if ! [[ " ${SUPPORTED_TEE_HYPERVISORS[@]} " =~ " ${KATA_HYPERVISOR} " ]] && ! [[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]]; then
skip "Test not supported for ${KATA_HYPERVISOR}."
fi
if [[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]]; then
info "Need to apply image annotations"
else
get_pod_config_dir
setup_unencrypted_confidential_pod
fi
confidential_setup || skip "Test not supported for ${KATA_HYPERVISOR}."
setup_unencrypted_confidential_pod
}
@test "Test unencrypted confidential container launch success and verify that we are running in a secure enclave." {
@ -54,10 +41,8 @@ setup() {
}
teardown() {
if ! [[ " ${SUPPORTED_TEE_HYPERVISORS[@]} " =~ " ${KATA_HYPERVISOR} " ]] && ! [[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]]; then
skip "Test not supported for ${KATA_HYPERVISOR}."
fi
check_hypervisor_for_confidential_tests ${KATA_HYPERVISOR} || skip "Test not supported for ${KATA_HYPERVISOR}."
kubectl describe "pod/${pod_name}" || true
kubectl delete -f "${pod_config_dir}/pod-confidential-unencrypted.yaml" || true
}

View File

@ -0,0 +1,175 @@
#!/usr/bin/env bats
# Copyright (c) 2023 Intel Corporation
# Copyright (c) 2023 IBM Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
load "${BATS_TEST_DIRNAME}/lib.sh"
load "${BATS_TEST_DIRNAME}/confidential_common.sh"
setup() {
confidential_setup || skip "Test not supported for ${KATA_HYPERVISOR}."
setup_common
unencrypted_image_1="quay.io/sjenning/nginx:1.15-alpine"
unencrypted_image_2="quay.io/prometheus/busybox:latest"
}
@test "Test we can pull an unencrypted image outside the guest with runc and then inside the guest successfully" {
[[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]] && skip "Test not supported for ${KATA_HYPERVISOR}."
# 1. Create one runc pod with the $unencrypted_image_1 image
# We want to have one runc pod, so we pass a fake runtimeclass "runc" and then delete the runtimeClassName,
# because the runtimeclass is not optional in new_pod_config function.
runc_pod_config="$(new_pod_config "$unencrypted_image_1" "runc")"
sed -i '/runtimeClassName:/d' $runc_pod_config
set_node "$runc_pod_config" "$node"
set_container_command "$runc_pod_config" "0" "sleep" "30"
# For debug sake
echo "Pod $runc_pod_config file:"
cat $runc_pod_config
k8s_create_pod "$runc_pod_config"
echo "Runc pod test-e2e is running"
kubectl delete -f "$runc_pod_config"
# 2. Create one kata pod with the $unencrypted_image_1 image and nydus annotation
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_with_nydus_config" "$node"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"
# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"
# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config
k8s_create_pod "$kata_pod_with_nydus_config"
echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# With annotation for nydus, only rootfs for pause container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "1"
}
@test "Test we can pull an unencrypted image inside the guest twice in a row and then outside the guest successfully" {
[[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]] && skip "Test not supported for ${KATA_HYPERVISOR}."
skip "Skip this test until we use containerd 2.0 with 'image pull per runtime class' feature: https://github.com/containerd/containerd/issues/9377"
# 1. Create one kata pod with the $unencrypted_image_1 image and nydus annotation twice
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_with_nydus_config" "$node"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"
# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"
# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config
k8s_create_pod "$kata_pod_with_nydus_config"
echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# With annotation for nydus, only rootfs for pause container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "1"
kubectl delete -f $kata_pod_with_nydus_config
# 2. Create one kata pod with the $unencrypted_image_1 image and without nydus annotation
kata_pod_without_nydus_config="$(new_pod_config "$unencrypted_image_1" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_without_nydus_config" "$node"
set_container_command "$kata_pod_without_nydus_config" "0" "sleep" "30"
# For debug sake
echo "Pod $kata_pod_without_nydus_config file:"
cat $kata_pod_without_nydus_config
k8s_create_pod "$kata_pod_without_nydus_config"
echo "Kata pod test-e2e without nydus annotation is running"
echo "Check the image was not pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# The assert_rootfs_count will be FAIL.
# The expect count of rootfs in host is "2" but the found count of rootfs in host is "1"
# As the the first time we pull the $unencrypted_image_1 image via nydus-snapshotter in the guest
# for all subsequent pulls still use nydus-snapshotter in the guest
# More details: https://github.com/kata-containers/kata-containers/issues/8337
# The test case will be PASS after we use containerd 2.0 with 'image pull per runtime class' feature:
# https://github.com/containerd/containerd/issues/9377
assert_rootfs_count "$node" "$sandbox_id" "2"
}
@test "Test we can pull an other unencrypted image outside the guest and then inside the guest successfully" {
[[ " ${SUPPORTED_NON_TEE_HYPERVISORS} " =~ " ${KATA_HYPERVISOR} " ]] && skip "Test not supported for ${KATA_HYPERVISOR}."
skip "Skip this test until we use containerd 2.0 with 'image pull per runtime class' feature: https://github.com/containerd/containerd/issues/9377"
# 1. Create one kata pod with the $unencrypted_image_2 image and without nydus annotation
kata_pod_without_nydus_config="$(new_pod_config "$unencrypted_image_2" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_without_nydus_config" "$node"
set_container_command "$kata_pod_without_nydus_config" "0" "sleep" "30"
# For debug sake
echo "Pod $kata_pod_without_nydus_config file:"
cat $kata_pod_without_nydus_config
k8s_create_pod "$kata_pod_without_nydus_config"
echo "Kata pod test-e2e without nydus annotation is running"
echo "Checking the image was pulled in the host"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# Without annotation for nydus, both rootfs for pause and the test container can be found on host
assert_rootfs_count "$node" "$sandbox_id" "2"
kubectl delete -f $kata_pod_without_nydus_config
# 2. Create one kata pod with the $unencrypted_image_2 image and with nydus annotation
kata_pod_with_nydus_config="$(new_pod_config "$unencrypted_image_2" "kata-${KATA_HYPERVISOR}")"
set_node "$kata_pod_with_nydus_config" "$node"
set_container_command "$kata_pod_with_nydus_config" "0" "sleep" "30"
# Set annotation to pull image in guest
set_metadata_annotation "$kata_pod_with_nydus_config" \
"io.containerd.cri.runtime-handler" \
"kata-${KATA_HYPERVISOR}"
# For debug sake
echo "Pod $kata_pod_with_nydus_config file:"
cat $kata_pod_with_nydus_config
k8s_create_pod "$kata_pod_with_nydus_config"
echo "Kata pod test-e2e with nydus annotation is running"
echo "Checking the image was pulled in the guest"
sandbox_id=$(get_node_kata_sandbox_id $node)
echo "sandbox_id is: $sandbox_id"
# The assert_rootfs_count will be FAIL.
# The expect count of rootfs in host is "1" but the found count of rootfs in host is "2"
# As the the first time we pull the $unencrypted_image_2 image via overlayfs-snapshotter in host
# for all subsequent pulls still use overlayfs-snapshotter in host.
# More details: https://github.com/kata-containers/kata-containers/issues/8337
# The test case will be PASS after we use containerd 2.0 with 'image pull per runtime class' feature:
# https://github.com/containerd/containerd/issues/9377
assert_rootfs_count "$node" "$sandbox_id" "1"
}
teardown() {
check_hypervisor_for_confidential_tests ${KATA_HYPERVISOR} || skip "Test not supported for ${KATA_HYPERVISOR}."
kubectl describe pod "$pod_name"
k8s_delete_all_pods_if_any_exists || true
}

View File

@ -8,6 +8,9 @@
#
set -e
wait_time=60
sleep_time=3
# Delete all pods if any exist, otherwise just return
#
k8s_delete_all_pods_if_any_exists() {
@ -94,11 +97,49 @@ assert_pod_fail() {
! k8s_create_pod "$container_config" || /bin/false
}
# Check the pulled rootfs on host for given node and sandbox_id
#
# Parameters:
# $1 - the k8s worker node name
# $2 - the sandbox id for kata container
# $3 - the expected count of pulled rootfs
#
assert_rootfs_count() {
local node="$1"
local sandbox_id="$2"
local expect_count="$3"
local allrootfs=""
# verify that the sandbox_id is not empty;
# otherwise, the command $(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id} -name rootfs -type d")
# may yield an unexpected count of rootfs.
if [ -z "$sandbox_id" ]; then
return 1
fi
# Max loop 3 times to get all pulled rootfs for given sandbox_id
for _ in {1..3}
do
allrootfs=$(exec_host $node "find /run/kata-containers/shared/sandboxes/${sandbox_id} -name rootfs -type d")
if [ -n "$allrootfs" ]; then
break
else
sleep 1
fi
done
echo "allrootfs is: $allrootfs"
count=$(echo $allrootfs | grep -o "rootfs" | wc -l)
echo "count of container rootfs in host is: $count, expect count is: $expect_count"
[ $expect_count -eq $count ]
}
# Create a pod configuration out of a template file.
#
# Parameters:
# $1 - the container image.
# $2 - the runtimeclass
# $2 - the runtimeclass, is not optional.
# $3 - the specific node name, optional.
#
# Return:
# the path to the configuration file. The caller should not care about
@ -116,6 +157,7 @@ new_pod_config() {
new_config=$(mktemp "${BATS_FILE_TMPDIR}/$(basename "${base_config}").XXX")
IMAGE="$image" RUNTIMECLASS="$runtimeclass" envsubst < "$base_config" > "$new_config"
echo "$new_config"
}
@ -147,7 +189,27 @@ set_metadata_annotation() {
echo "$annotation_key"
# yq set annotations in yaml. Quoting the key because it can have
# dots.
yq w -i --style=double "${yaml}" "${annotation_key}" "${value}"
yq write -i --style=double "${yaml}" "${annotation_key}" "${value}"
}
# Set the command for container spec.
#
# Parameters:
# $1 - the yaml file
# $2 - the index of the container
# $N - the command values
#
set_container_command() {
local yaml="${1}"
local container_idx="${2}"
shift 2
for command_value in "$@"; do
yq write -i \
"${yaml}" \
"spec.containers[${container_idx}].command[+]" \
--tag '!!str' "${command_value}"
done
}
# Set the node name on configuration spec.
@ -161,7 +223,10 @@ set_node() {
local node="$2"
[ -n "$node" ] || return 1
yq w -i "${yaml}" "spec.nodeName" "$node"
yq write -i \
"${yaml}" \
"spec.nodeName" \
"$node"
}
# Get the systemd's journal from a worker node
@ -183,3 +248,30 @@ print_node_journal() {
kubectl get pods -o name | grep "node-debugger-${node}" | \
xargs kubectl delete > /dev/null
}
# Get the sandbox id for kata container from a worker node
#
# Parameters:
# $1 - the k8s worker node name
#
get_node_kata_sandbox_id() {
local node="$1"
local kata_sandbox_id=""
local local_wait_time="${wait_time}"
# Max loop 3 times to get kata_sandbox_id
while [ "$local_wait_time" -gt 0 ];
do
kata_sandbox_id=$(exec_host $node "ps -ef |\
grep containerd-shim-kata-v2" |\
grep -oP '(?<=-id\s)[a-f0-9]+' |\
tail -1)
if [ -n "$kata_sandbox_id" ]; then
break
else
sleep "${sleep_time}"
local_wait_time=$((local_wait_time-sleep_time))
fi
done
echo $kata_sandbox_id
}

View File

@ -20,7 +20,12 @@ ALLOW_ALL_POLICY="${ALLOW_ALL_POLICY:-$(base64 -w 0 runtimeclass_workloads_work/
if [ -n "${K8S_TEST_UNION:-}" ]; then
K8S_TEST_UNION=($K8S_TEST_UNION)
else
# Before we use containerd 2.0 with 'image pull per runtime class' feature
# we need run k8s-guest-pull-image.bats test first, otherwise the test result will be affected
# by other cases which are using 'alpine' and 'quay.io/prometheus/busybox:latest' image.
# more details https://github.com/kata-containers/kata-containers/issues/8337
K8S_TEST_SMALL_HOST_UNION=( \
"k8s-guest-pull-image.bats" \
"k8s-confidential.bats" \
"k8s-attach-handlers.bats" \
"k8s-caps.bats" \

View File

@ -54,12 +54,18 @@ add_annotations_to_yaml() {
Pod)
echo "Adding kernel and initrd annotations to ${resource_kind} from ${yaml_file}"
yq write -i "${K8S_TEST_YAML}" "metadata.annotations[${annotation_name}]" "${annotation_value}"
yq write -i \
"${K8S_TEST_YAML}" \
"metadata.annotations[${annotation_name}]" \
"${annotation_value}"
;;
Deployment|Job|ReplicationController)
echo "Adding kernel and initrd annotations to ${resource_kind} from ${yaml_file}"
yq write -i "${K8S_TEST_YAML}" "spec.template.metadata.annotations[${annotation_name}]" "${annotation_value}"
yq write -i \
"${K8S_TEST_YAML}" \
"spec.template.metadata.annotations[${annotation_name}]" \
"${annotation_value}"
;;
List)

View File

@ -17,6 +17,8 @@ RUST_VERSION="null"
AGENT_BIN=${AGENT_BIN:-kata-agent}
AGENT_INIT=${AGENT_INIT:-no}
MEASURED_ROOTFS=${MEASURED_ROOTFS:-no}
# The kata agent enables guest-pull feature.
PULL_TYPE=${PULL_TYPE:-default}
KERNEL_MODULES_DIR=${KERNEL_MODULES_DIR:-""}
OSBUILDER_VERSION="unknown"
DOCKER_RUNTIME=${DOCKER_RUNTIME:-runc}
@ -706,7 +708,7 @@ EOF
git checkout "${AGENT_VERSION}" && OK "git checkout successful" || die "checkout agent ${AGENT_VERSION} failed!"
fi
make clean
make LIBC=${LIBC} INIT=${AGENT_INIT} SECCOMP=${SECCOMP} AGENT_POLICY=${AGENT_POLICY}
make LIBC=${LIBC} INIT=${AGENT_INIT} SECCOMP=${SECCOMP} AGENT_POLICY=${AGENT_POLICY} PULL_TYPE=${PULL_TYPE}
make install DESTDIR="${ROOTFS_DIR}" LIBC=${LIBC} INIT=${AGENT_INIT}
if [ "${SECCOMP}" == "yes" ]; then
rm -rf "${libseccomp_install_dir}" "${gperf_install_dir}"

View File

@ -47,6 +47,10 @@ EOF
ln -s /run "$rootfs_dir/var/run"
cp --remove-destination /etc/resolv.conf "$rootfs_dir/etc"
local dir="$rootfs_dir/etc/ssl/certs"
mkdir -p "$dir"
cp --remove-destination /etc/ssl/certs/ca-certificates.crt "$dir"
# Reduce image size and memory footprint by removing unnecessary files and directories.
rm -rf $rootfs_dir/usr/share/{bash-completion,bug,doc,info,lintian,locale,man,menu,misc,pixmaps,terminfo,zsh}

View File

@ -45,6 +45,7 @@ build_initrd() {
AGENT_TARBALL="${AGENT_TARBALL}" \
AGENT_INIT="yes" \
AGENT_POLICY="${AGENT_POLICY:-}" \
PULL_TYPE="${PULL_TYPE:-default}" \
COCO_GUEST_COMPONENTS_TARBALL="${COCO_GUEST_COMPONENTS_TARBALL:-}" \
PAUSE_IMAGE_TARBALL="${PAUSE_IMAGE_TARBALL:-}"
mv "kata-containers-initrd.img" "${install_dir}/${artifact_name}"
@ -66,6 +67,7 @@ build_image() {
ROOTFS_BUILD_DEST="${builddir}/rootfs-image" \
AGENT_TARBALL="${AGENT_TARBALL}" \
AGENT_POLICY="${AGENT_POLICY:-}" \
PULL_TYPE="${PULL_TYPE:-default}" \
COCO_GUEST_COMPONENTS_TARBALL="${COCO_GUEST_COMPONENTS_TARBALL:-}" \
PAUSE_IMAGE_TARBALL="${PAUSE_IMAGE_TARBALL:-}"
mv -f "kata-containers.img" "${install_dir}/${artifact_name}"

View File

@ -43,6 +43,10 @@ spec:
value: ""
- name: SNAPSHOTTER_HANDLER_MAPPING
value: ""
- name: AGENT_HTTPS_PROXY
value: ""
- name: AGENT_NO_PROXY
value: ""
securityContext:
privileged: true
volumeMounts:

View File

@ -97,6 +97,7 @@ TDSHIM_CONTAINER_BUILDER="${TDSHIM_CONTAINER_BUILDER:-}"
TOOLS_CONTAINER_BUILDER="${TOOLS_CONTAINER_BUILDER:-}"
VIRTIOFSD_CONTAINER_BUILDER="${VIRTIOFSD_CONTAINER_BUILDER:-}"
MEASURED_ROOTFS="${MEASURED_ROOTFS:-}"
PULL_TYPE="${PULL_TYPE:-default}"
USE_CACHE="${USE_CACHE:-}"
docker run \
@ -123,6 +124,7 @@ docker run \
--env TOOLS_CONTAINER_BUILDER="${TOOLS_CONTAINER_BUILDER}" \
--env VIRTIOFSD_CONTAINER_BUILDER="${VIRTIOFSD_CONTAINER_BUILDER}" \
--env MEASURED_ROOTFS="${MEASURED_ROOTFS}" \
--env PULL_TYPE="${PULL_TYPE}" \
--env USE_CACHE="${USE_CACHE}" \
--env AA_KBC="${AA_KBC:-}" \
--env HKD_PATH="$(realpath "${HKD_PATH:-}" 2> /dev/null || true)" \

View File

@ -41,6 +41,7 @@ readonly se_image_builder="${repo_root_dir}/tools/packaging/guest-image/build_se
ARCH=${ARCH:-$(uname -m)}
MEASURED_ROOTFS=${MEASURED_ROOTFS:-no}
PULL_TYPE=${PULL_TYPE:-default}
USE_CACHE="${USE_CACHE:-"yes"}"
ARTEFACT_REGISTRY="${ARTEFACT_REGISTRY:-ghcr.io}"
ARTEFACT_REGISTRY_USERNAME="${ARTEFACT_REGISTRY_USERNAME:-}"
@ -328,6 +329,7 @@ install_image() {
install_image_confidential() {
export AGENT_POLICY=yes
export MEASURED_ROOTFS=yes
export PULL_TYPE=default
install_image "confidential"
}
@ -396,6 +398,7 @@ install_initrd() {
install_initrd_confidential() {
export AGENT_POLICY=yes
export MEASURED_ROOTFS=yes
export PULL_TYPE=default
install_initrd "confidential"
}
@ -782,7 +785,7 @@ install_agent_helper() {
export GPERF_URL="$(get_from_kata_deps "externals.gperf.url")"
info "build static agent"
DESTDIR="${destdir}" AGENT_POLICY=${agent_policy} "${agent_builder}"
DESTDIR="${destdir}" AGENT_POLICY=${agent_policy} PULL_TYPE=${PULL_TYPE} "${agent_builder}"
}
install_agent() {

View File

@ -29,6 +29,9 @@ SNAPSHOTTER_HANDLER_MAPPING="${SNAPSHOTTER_HANDLER_MAPPING:-}"
IFS=',' read -a snapshotters <<< "$SNAPSHOTTER_HANDLER_MAPPING"
snapshotters_delimiter=':'
AGENT_HTTPS_PROXY="${AGENT_HTTPS_PROXY:-}"
AGENT_NO_PROXY="${AGENT_NO_PROXY:-}"
# If we fail for any reason a message will be displayed
die() {
msg="$*"
@ -159,6 +162,15 @@ function install_artifacts() {
mkdir -p "$config_path"
local kata_config_file="${config_path}/configuration-${shim}.toml"
# Properly set https_proxy and no_proxy for Kata Containers
if [ -n "${AGENT_HTTPS_PROXY}" ]; then
sed -i -e 's|^kernel_params = "\(.*\)"|kernel_params = "\1 agent.https_proxy='${AGENT_HTTPS_PROXY}'"|g' "${kata_config_file}"
fi
if [ -n "${AGENT_NO_PROXY}" ]; then
sed -i -e 's|^kernel_params = "\(.*\)"|kernel_params = "\1 agent.no_proxy='${AGENT_NO_PROXY}'"|g' "${kata_config_file}"
fi
# Allow enabling debug for Kata Containers
if [[ "${DEBUG}" == "true" ]]; then
sed -i -e 's/^#\(enable_debug\).*=.*$/\1 = true/g' "${kata_config_file}"
@ -501,6 +513,9 @@ function main() {
echo "* CREATE_RUNTIMECLASSES: ${CREATE_RUNTIMECLASSES}"
echo "* CREATE_DEFAULT_RUNTIMECLASS: ${CREATE_DEFAULT_RUNTIMECLASS}"
echo "* ALLOWED_HYPERVISOR_ANNOTATIONS: ${ALLOWED_HYPERVISOR_ANNOTATIONS}"
echo "* SNAPSHOTTER_HANDLER_MAPPING: ${SNAPSHOTTER_HANDLER_MAPPING}"
echo "* AGENT_HTTPS_PROXY: ${AGENT_HTTPS_PROXY}"
echo "* AGENT_NO_PROXY: ${AGENT_NO_PROXY}"
# script requires that user is root
euid=$(id -u)

View File

@ -21,7 +21,8 @@ RUN apt-get update && \
musl-tools \
openssl \
perl \
protobuf-compiler && \
protobuf-compiler \
clang && \
apt-get clean && rm -rf /var/lib/apt/lists/ && \
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain ${RUST_TOOLCHAIN}

View File

@ -49,8 +49,8 @@ build_agent_from_source() {
/usr/bin/install_libseccomp.sh /usr /usr
cd src/agent
DESTDIR=${DESTDIR} AGENT_POLICY=${AGENT_POLICY} make
DESTDIR=${DESTDIR} AGENT_POLICY=${AGENT_POLICY} make install
DESTDIR=${DESTDIR} AGENT_POLICY=${AGENT_POLICY} PULL_TYPE=${PULL_TYPE} make
DESTDIR=${DESTDIR} AGENT_POLICY=${AGENT_POLICY} PULL_TYPE=${PULL_TYPE} make install
}
build_agent_from_source $@

View File

@ -26,6 +26,7 @@ sudo docker pull ${container_image} || \
sudo docker run --rm -i -v "${repo_root_dir}:${repo_root_dir}" \
--env DESTDIR=${DESTDIR} \
--env AGENT_POLICY=${AGENT_POLICY:-no} \
--env PULL_TYPE=${PULL_TYPE:-default} \
--env LIBSECCOMP_VERSION=${LIBSECCOMP_VERSION} \
--env LIBSECCOMP_URL=${LIBSECCOMP_URL} \
--env GPERF_VERSION=${GPERF_VERSION} \