mirror of
https://github.com/kata-containers/kata-containers.git
synced 2026-01-25 06:26:41 +00:00
Merge pull request #12122 from fidencio/topic/configs-do-no-have-commented-out-options
runtimes: config: Do NOT have commented fields
This commit is contained in:
@@ -620,7 +620,7 @@ impl Annotation {
|
||||
hv.boot_info.kernel = value.to_string();
|
||||
}
|
||||
KATA_ANNO_CFG_HYPERVISOR_KERNEL_PARAMS => {
|
||||
hv.boot_info.kernel_params = value.to_string();
|
||||
hv.boot_info.replace_kernel_params(value);
|
||||
}
|
||||
KATA_ANNO_CFG_HYPERVISOR_IMAGE_PATH => {
|
||||
hv.boot_info.validate_boot_path(value)?;
|
||||
|
||||
@@ -47,7 +47,7 @@ pub const DEFAULT_BLOCK_DEVICE_QUEUE_SIZE: u32 = 128;
|
||||
pub const DEFAULT_SHARED_FS_TYPE: &str = "virtio-fs";
|
||||
pub const DEFAULT_VIRTIO_FS_CACHE_MODE: &str = "never";
|
||||
pub const DEFAULT_VIRTIO_FS_DAX_SIZE_MB: u32 = 1024;
|
||||
pub const DEFAULT_SHARED_9PFS_SIZE_MB: u32 = 128 * 1024;
|
||||
pub const DEFAULT_SHARED_9PFS_SIZE_MB: u32 = 8 * 1024;
|
||||
pub const MIN_SHARED_9PFS_SIZE_MB: u32 = 4 * 1024;
|
||||
pub const MAX_SHARED_9PFS_SIZE_MB: u32 = 8 * 1024 * 1024;
|
||||
|
||||
@@ -112,3 +112,6 @@ pub const MAX_REMOTE_VCPUS: u32 = 32;
|
||||
pub const MIN_REMOTE_MEMORY_SIZE_MB: u32 = 64;
|
||||
pub const DEFAULT_REMOTE_MEMORY_SIZE_MB: u32 = 128;
|
||||
pub const DEFAULT_REMOTE_MEMORY_SLOTS: u32 = 128;
|
||||
|
||||
// Default configuration for factory/templating
|
||||
pub const DEFAULT_TEMPLATE_PATH: &str = "/run/vc/vm/template";
|
||||
|
||||
@@ -85,6 +85,11 @@ impl ConfigPlugin for CloudHypervisorConfig {
|
||||
if ch.memory_info.memory_slots == 0 {
|
||||
ch.memory_info.memory_slots = default::DEFAULT_CH_MEMORY_SLOTS;
|
||||
}
|
||||
|
||||
// Apply factory defaults
|
||||
if ch.factory.template_path.is_empty() {
|
||||
ch.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -79,6 +79,11 @@ impl ConfigPlugin for DragonballConfig {
|
||||
if db.memory_info.memory_slots == 0 {
|
||||
db.memory_info.memory_slots = default::DEFAULT_DRAGONBALL_MEMORY_SLOTS;
|
||||
}
|
||||
|
||||
// Apply factory defaults
|
||||
if db.factory.template_path.is_empty() {
|
||||
db.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -69,6 +69,11 @@ impl ConfigPlugin for FirecrackerConfig {
|
||||
firecracker.memory_info.default_memory =
|
||||
default::DEFAULT_FIRECRACKER_MEMORY_SIZE_MB;
|
||||
}
|
||||
|
||||
// Apply factory defaults
|
||||
if firecracker.factory.template_path.is_empty() {
|
||||
firecracker.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -374,6 +374,71 @@ impl BootInfo {
|
||||
self.kernel_params = p.join(KERNEL_PARAM_DELIMITER);
|
||||
}
|
||||
|
||||
/// Replace kernel parameters with the same key.
|
||||
///
|
||||
/// For each parameter in the new_params string, if a parameter with the same key
|
||||
/// already exists in kernel_params, it will be removed before adding the new one.
|
||||
/// This allows selective parameter override from annotations without replacing
|
||||
/// the entire kernel command line.
|
||||
pub fn replace_kernel_params(&mut self, new_params: &str) {
|
||||
if new_params.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Parse existing kernel parameters into a map
|
||||
let mut existing_params: Vec<(String, String)> = Vec::new();
|
||||
for param in self.kernel_params.split(KERNEL_PARAM_DELIMITER) {
|
||||
let param = param.trim();
|
||||
if param.is_empty() {
|
||||
continue;
|
||||
}
|
||||
// Split by '=' to get key and value
|
||||
if let Some(eq_pos) = param.find('=') {
|
||||
let key = param[..eq_pos].to_string();
|
||||
let value = param[eq_pos + 1..].to_string();
|
||||
existing_params.push((key, value));
|
||||
} else {
|
||||
// Parameter without value (like "quiet")
|
||||
existing_params.push((param.to_string(), String::new()));
|
||||
}
|
||||
}
|
||||
|
||||
// Parse new parameters and collect keys to replace
|
||||
let mut new_param_keys: Vec<String> = Vec::new();
|
||||
let mut new_param_list: Vec<String> = Vec::new();
|
||||
for param in new_params.split(KERNEL_PARAM_DELIMITER) {
|
||||
let param = param.trim();
|
||||
if param.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Some(eq_pos) = param.find('=') {
|
||||
let key = param[..eq_pos].to_string();
|
||||
new_param_keys.push(key);
|
||||
} else {
|
||||
new_param_keys.push(param.to_string());
|
||||
}
|
||||
new_param_list.push(param.to_string());
|
||||
}
|
||||
|
||||
// Remove existing parameters that will be replaced
|
||||
existing_params.retain(|(key, _)| !new_param_keys.contains(key));
|
||||
|
||||
// Reconstruct kernel_params: existing params + new params
|
||||
let mut all_params: Vec<String> = existing_params
|
||||
.iter()
|
||||
.map(|(key, value)| {
|
||||
if value.is_empty() {
|
||||
key.clone()
|
||||
} else {
|
||||
format!("{}={}", key, value)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
all_params.extend(new_param_list);
|
||||
|
||||
self.kernel_params = all_params.join(KERNEL_PARAM_DELIMITER);
|
||||
}
|
||||
|
||||
/// Validate guest kernel image annotation.
|
||||
pub fn validate_boot_path(&self, path: &str) -> Result<()> {
|
||||
validate_path!(path, "path {} is invalid{}")?;
|
||||
|
||||
@@ -91,6 +91,11 @@ impl ConfigPlugin for QemuConfig {
|
||||
if qemu.memory_info.memory_slots == 0 {
|
||||
qemu.memory_info.memory_slots = default::DEFAULT_QEMU_MEMORY_SLOTS;
|
||||
}
|
||||
|
||||
// Apply factory defaults
|
||||
if qemu.factory.template_path.is_empty() {
|
||||
qemu.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -65,6 +65,11 @@ impl ConfigPlugin for RemoteConfig {
|
||||
if remote.memory_info.memory_slots == 0 {
|
||||
remote.memory_info.memory_slots = default::DEFAULT_REMOTE_MEMORY_SLOTS
|
||||
}
|
||||
|
||||
// Apply factory defaults
|
||||
if remote.factory.template_path.is_empty() {
|
||||
remote.factory.template_path = default::DEFAULT_TEMPLATE_PATH.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -52,7 +52,8 @@ pub struct Config {
|
||||
// the next compact_force_times times, a compaction will be forced
|
||||
// regardless of the system's memory situation.
|
||||
// If compact_force_times is set to 0, will do force compaction each time.
|
||||
// If compact_force_times is set to std::u64::MAX, will never do force compaction.
|
||||
// If compact_force_times is set to std::u64::MAX, u64::MAX - 1, or i64::MAX, will never do force compaction.
|
||||
// Note: Using i64::MAX (9223372036854775807) instead of u64::MAX to avoid TOML parser issues.
|
||||
pub compact_force_times: u64,
|
||||
}
|
||||
|
||||
@@ -67,7 +68,7 @@ impl Default for Config {
|
||||
compact_sec_max: 5 * 60,
|
||||
compact_order: PAGE_REPORTING_MIN_ORDER,
|
||||
compact_threshold: 2 << PAGE_REPORTING_MIN_ORDER,
|
||||
compact_force_times: u64::MAX,
|
||||
compact_force_times: i64::MAX as u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,7 +134,7 @@ impl CompactCore {
|
||||
}
|
||||
|
||||
fn need_force_compact(&self) -> bool {
|
||||
if self.config.compact_force_times == u64::MAX {
|
||||
if self.config.compact_force_times >= i64::MAX as u64 {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -347,8 +347,13 @@ endif
|
||||
DEFBLOCKDEVICEAIO_QEMU := io_uring
|
||||
DEFNETWORKMODEL_QEMU := tcfilter
|
||||
DEFDISABLEGUESTSELINUX := true
|
||||
DEFSECCOMPSANDBOXPARAM := on,obsolete=deny,spawn=deny,resourcecontrol=deny
|
||||
DEFGUESTSELINUXLABEL := system_u:system_r:container_t
|
||||
# Default is empty string "" to match Rust default None (when commented out in config).
|
||||
# Most users will want to set this to "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
# for better security. Note: "elevateprivileges=deny" doesn't work with daemonize option.
|
||||
DEFSECCOMPSANDBOXPARAM := ""
|
||||
# Default is empty string "" to match Rust default None (when commented out in config).
|
||||
# Most users will want to set this to "system_u:system_r:container_t" for SELinux support.
|
||||
DEFGUESTSELINUXLABEL := ""
|
||||
endif
|
||||
|
||||
ifneq (,$(FCCMD))
|
||||
|
||||
@@ -18,41 +18,15 @@ image = "@IMAGEPATH@"
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Block storage driver to be used for the VM rootfs is backed
|
||||
# by a block device.
|
||||
vm_rootfs_driver = "@VMROOTFSDRIVER_CLH@"
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Supported TEEs:
|
||||
# * Intel TDX
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
# Path to the firmware.
|
||||
# If you want Cloud Hypervisor to use a specific firmware, set its path below.
|
||||
# This is option is only used when confidential_guest is enabled.
|
||||
#
|
||||
# For more information about firmwared that can be used with specific TEEs,
|
||||
# please, refer to:
|
||||
# * Intel TDX:
|
||||
# - td-shim: https://github.com/confidential-containers/td-shim
|
||||
#
|
||||
# firmware = "@FIRMWAREPATH@"
|
||||
firmware = "@FIRMWAREPATH@"
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -68,7 +42,7 @@ valid_hypervisor_paths = @CLHVALIDHYPERVISORPATHS@
|
||||
# List of valid annotations values for ctlpath
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends:
|
||||
# valid_ctlpaths =
|
||||
valid_ctlpaths = []
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
@@ -166,7 +140,7 @@ default_bridges = @DEFBRIDGES@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Block device driver to be used by the hypervisor when a container's storage
|
||||
# is backed by a block device or a file. This driver facilitates attaching the
|
||||
@@ -176,7 +150,7 @@ block_device_driver = "virtio-blk-pci"
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Bandwidth rate limiter options
|
||||
#
|
||||
@@ -184,35 +158,35 @@ block_device_driver = "virtio-blk-pci"
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#disk_rate_limiter_bw_max_rate = 0
|
||||
#
|
||||
disk_rate_limiter_bw_max_rate = 0
|
||||
|
||||
# disk_rate_limiter_bw_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#disk_rate_limiter_bw_one_time_burst = 0
|
||||
#
|
||||
disk_rate_limiter_bw_one_time_burst = 0
|
||||
|
||||
# Operation rate limiter options
|
||||
#
|
||||
# disk_rate_limiter_ops_max_rate controls disk I/O bandwidth (size in ops/sec
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#disk_rate_limiter_ops_max_rate = 0
|
||||
#
|
||||
disk_rate_limiter_ops_max_rate = 0
|
||||
|
||||
# disk_rate_limiter_ops_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#disk_rate_limiter_ops_one_time_burst = 0
|
||||
disk_rate_limiter_ops_one_time_burst = 0
|
||||
|
||||
# Virtio queue size. Size: byte. default 128
|
||||
#queue_size: u32,
|
||||
queue_size = 128
|
||||
|
||||
# Block device multi-queue, default 1
|
||||
#num_queues: usize,
|
||||
num_queues = 1
|
||||
|
||||
# Enable pre allocation of VM RAM, default false
|
||||
# Enabling this will result in lower container density
|
||||
@@ -221,7 +195,7 @@ block_device_driver = "virtio-blk-pci"
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -229,27 +203,27 @@ block_device_driver = "virtio-blk-pci"
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable running clh VMM as a non-root user.
|
||||
# By default clh VMM run as root. When this is set to true, clh VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
#rootless = true
|
||||
rootless = false
|
||||
|
||||
# Disable the 'seccomp' feature from Cloud Hypervisor, firecracker or dragonball, default false
|
||||
# disable_seccomp = true
|
||||
disable_seccomp = false
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# Path to OCI hook binaries in the *guest rootfs*.
|
||||
# This does not affect host-side hooks which must instead be added to
|
||||
@@ -266,30 +240,31 @@ block_device_driver = "virtio-blk-pci"
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# If enable_guest_swap is enabled, the swap device will be created in the guest
|
||||
# at this path. Default "/run/kata-containers/swap".
|
||||
#guest_swap_path = "/run/kata-containers/swap"
|
||||
guest_swap_path = "/run/kata-containers/swap"
|
||||
|
||||
# The percentage of the total memory to be used as swap device.
|
||||
# Default 100.
|
||||
#guest_swap_size_percent = 100
|
||||
guest_swap_size_percent = 100
|
||||
|
||||
# The threshold in seconds to create swap device in the guest.
|
||||
# Kata will wait guest_swap_create_threshold_secs seconds before creating swap device.
|
||||
# Default 60.
|
||||
#guest_swap_create_threshold_secs = 60
|
||||
guest_swap_create_threshold_secs = 60
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
container_pipe_size=@PIPESIZE@
|
||||
container_pipe_size = @PIPESIZE@
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -303,18 +278,18 @@ container_pipe_size=@PIPESIZE@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent dial timeout in millisecond.
|
||||
# (default: 10)
|
||||
#dial_timeout_ms = 10
|
||||
dial_timeout_ms = 10
|
||||
|
||||
# Agent reconnect timeout in millisecond.
|
||||
# Retry times = reconnect_timeout_ms / dial_timeout_ms (default: 300)
|
||||
@@ -323,28 +298,28 @@ container_pipe_size=@PIPESIZE@
|
||||
# You'd better not change the value of dial_timeout_ms, unless you have an
|
||||
# idea of what you are doing.
|
||||
# (default: 3000)
|
||||
#reconnect_timeout_ms = 3000
|
||||
reconnect_timeout_ms = 3000
|
||||
|
||||
[agent.@PROJECT_TYPE@.mem_agent]
|
||||
# Control the mem-agent function enable or disable.
|
||||
# Default to false
|
||||
#mem_agent_enable = true
|
||||
mem_agent_enable = false
|
||||
|
||||
# Control the mem-agent memcg function disable or enable
|
||||
# Default to false
|
||||
#memcg_disable = false
|
||||
memcg_disable = false
|
||||
|
||||
# Control the mem-agent function swap enable or disable.
|
||||
# Default to false
|
||||
#memcg_swap = false
|
||||
memcg_swap = false
|
||||
|
||||
# Control the mem-agent function swappiness max number.
|
||||
# Default to 50
|
||||
#memcg_swappiness_max = 50
|
||||
memcg_swappiness_max = 50
|
||||
|
||||
# Control the mem-agent memcg function wait period seconds
|
||||
# Default to 600
|
||||
#memcg_period_secs = 600
|
||||
memcg_period_secs = 600
|
||||
|
||||
# Control the mem-agent memcg wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
@@ -352,7 +327,7 @@ container_pipe_size=@PIPESIZE@
|
||||
# then the aging and eviction for this cgroup will not be
|
||||
# executed after this waiting period.
|
||||
# Default to 1
|
||||
#memcg_period_psi_percent_limit = 1
|
||||
memcg_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time for a cgroup
|
||||
@@ -360,44 +335,44 @@ container_pipe_size=@PIPESIZE@
|
||||
# this cgroup will immediately stop and will not resume until
|
||||
# the next memcg waiting period.
|
||||
# Default to 1
|
||||
#memcg_eviction_psi_percent_limit = 1
|
||||
memcg_eviction_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction run aging count min.
|
||||
# A cgroup will only perform eviction when the number of aging cycles
|
||||
# in memcg is greater than or equal to memcg_eviction_run_aging_count_min.
|
||||
# Default to 3
|
||||
#memcg_eviction_run_aging_count_min = 3
|
||||
memcg_eviction_run_aging_count_min = 3
|
||||
|
||||
# Control the mem-agent compact function disable or enable
|
||||
# Default to false
|
||||
#compact_disable = false
|
||||
compact_disable = false
|
||||
|
||||
# Control the mem-agent compaction function wait period seconds
|
||||
# Default to 600
|
||||
#compact_period_secs = 600
|
||||
compact_period_secs = 600
|
||||
|
||||
# Control the mem-agent compaction function wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
# the compaction waiting period exceeds this value,
|
||||
# then the compaction will not be executed after this waiting period.
|
||||
# Default to 1
|
||||
#compact_period_psi_percent_limit = 1
|
||||
compact_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent compaction function compact PSI percent limit.
|
||||
# During compaction, the percentage of memory and IO PSI stall time
|
||||
# is checked every second. If this percentage exceeds
|
||||
# compact_psi_percent_limit, the compaction process will stop.
|
||||
# Default to 5
|
||||
#compact_psi_percent_limit = 5
|
||||
compact_psi_percent_limit = 5
|
||||
|
||||
# Control the maximum number of seconds for each compaction of mem-agent compact function.
|
||||
# Default to 180
|
||||
#compact_sec_max = 180
|
||||
# Default to 300
|
||||
compact_sec_max = 300
|
||||
|
||||
# Control the mem-agent compaction function compact order.
|
||||
# compact_order is use with compact_threshold.
|
||||
# Default to 9
|
||||
#compact_order = 9
|
||||
compact_order = 9
|
||||
|
||||
# Control the mem-agent compaction function compact threshold.
|
||||
# compact_threshold is the pages number.
|
||||
@@ -410,7 +385,7 @@ container_pipe_size=@PIPESIZE@
|
||||
# since the previous compaction.
|
||||
# then the system should initiate another round of memory compaction.
|
||||
# Default to 1024
|
||||
#compact_threshold = 1024
|
||||
compact_threshold = 1024
|
||||
|
||||
# Control the mem-agent compaction function force compact times.
|
||||
# After one compaction, if there has not been a compaction within
|
||||
@@ -419,7 +394,9 @@ container_pipe_size=@PIPESIZE@
|
||||
# If compact_force_times is set to 0, will do force compaction each time.
|
||||
# If compact_force_times is set to 18446744073709551615, will never do force compaction.
|
||||
# Default to 18446744073709551615
|
||||
#compact_force_times = 18446744073709551615
|
||||
# Note: Using a large but valid u64 value (within i64::MAX range) instead of u64::MAX to avoid TOML parser issues
|
||||
# Using 9223372036854775807 (i64::MAX) which is effectively "never" for practical purposes
|
||||
compact_force_times = 9223372036854775807
|
||||
|
||||
# Create Container Request Timeout
|
||||
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
|
||||
@@ -432,20 +409,20 @@ container_pipe_size=@PIPESIZE@
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# If enabled, enabled, it means that 1) if the runtime exits abnormally,
|
||||
# the cleanup process will be skipped, and 2) the runtime will not exit
|
||||
# even if the health check fails.
|
||||
# This option is typically used to retain abnormal information for debugging.
|
||||
# (default: false)
|
||||
#keep_abnormal = true
|
||||
keep_abnormal = false
|
||||
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -470,33 +447,33 @@ container_pipe_size=@PIPESIZE@
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_CLH@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_CLH@"
|
||||
|
||||
name="@RUNTIMENAME@"
|
||||
hypervisor_name="@HYPERVISOR_CLH@"
|
||||
agent_name="@PROJECT_TYPE@"
|
||||
name = "@RUNTIMENAME@"
|
||||
hypervisor_name = "@HYPERVISOR_CLH@"
|
||||
agent_name = "@PROJECT_TYPE@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -504,7 +481,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -512,18 +489,18 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_CLH@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_CLH@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -532,7 +509,7 @@ experimental=@DEFAULTEXPFEATURES@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_CLH@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_CLH@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted(ro, rw) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
@@ -542,7 +519,7 @@ static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_CLH@
|
||||
# - "/path/to", default readonly mode.
|
||||
# - "/path/to:ro", readonly mode.
|
||||
# - "/path/to:rw", readwrite mode.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# Base directory of directly attachable network config.
|
||||
# Network devices for VM-based containers are allowed to be placed in the
|
||||
|
||||
@@ -16,13 +16,12 @@ path = "@DBPATH@"
|
||||
ctlpath = "@DBCTLPATH@"
|
||||
kernel = "@KERNELPATH_DB@"
|
||||
image = "@IMAGEPATH@"
|
||||
# initrd = "@INITRDPATH@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
|
||||
# Block storage driver to be used for the VM rootfs is backed
|
||||
@@ -43,7 +42,7 @@ valid_hypervisor_paths = @DBVALIDHYPERVISORPATHS@
|
||||
# List of valid annotations values for ctlpath
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
# Your distribution recommends:
|
||||
# valid_ctlpaths =
|
||||
valid_ctlpaths = []
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
@@ -106,7 +105,7 @@ default_bridges = @DEFBRIDGES@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Default memory size in MiB for SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||
@@ -129,7 +128,7 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_DB@"
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# The log level will be applied to hypervisor.
|
||||
# Possible values are:
|
||||
@@ -140,17 +139,18 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_DB@"
|
||||
# - error
|
||||
# - critical
|
||||
# Default: info
|
||||
#log_level = "info"
|
||||
log_level = "info"
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
# Default false
|
||||
disable_nesting_checks = false
|
||||
|
||||
# If host doesn't support vhost_net, set to true. Thus we won't create vhost fds for nics.
|
||||
# Default false
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
# Path to OCI hook binaries in the *guest rootfs*.
|
||||
# This does not affect host-side hooks which must instead be added to
|
||||
@@ -167,7 +167,8 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_DB@"
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
# Shared file system type:
|
||||
# - inline-virtio-fs (default)
|
||||
@@ -209,13 +210,13 @@ virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Virtio queue size. Size: byte. default 128
|
||||
#queue_size: u32,
|
||||
queue_size = 128
|
||||
|
||||
# Block device multi-queue, default 1
|
||||
#num_queues: usize,
|
||||
num_queues = 1
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -223,33 +224,33 @@ virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Disable the 'seccomp' feature from Cloud Hypervisor, firecracker or dragonball, default false
|
||||
# disable_seccomp = true
|
||||
disable_seccomp = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# If enable_guest_swap is enabled, the swap device will be created in the guest
|
||||
# at this path. Default "/run/kata-containers/swap".
|
||||
#guest_swap_path = "/run/kata-containers/swap"
|
||||
guest_swap_path = "/run/kata-containers/swap"
|
||||
|
||||
# The percentage of the total memory to be used as swap device.
|
||||
# Default 100.
|
||||
#guest_swap_size_percent = 100
|
||||
guest_swap_size_percent = 100
|
||||
|
||||
# The threshold in seconds to create swap device in the guest.
|
||||
# Kata will wait guest_swap_create_threshold_secs seconds before creating swap device.
|
||||
# Default 60.
|
||||
#guest_swap_create_threshold_secs = 60
|
||||
guest_swap_create_threshold_secs = 60
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
container_pipe_size=@PIPESIZE@
|
||||
container_pipe_size = @PIPESIZE@
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# The log level will be applied to agent.
|
||||
# Possible values are:
|
||||
@@ -260,7 +261,7 @@ container_pipe_size=@PIPESIZE@
|
||||
# - error
|
||||
# - critical
|
||||
# (default: info)
|
||||
#log_level = "info"
|
||||
log_level = "info"
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -274,18 +275,18 @@ container_pipe_size=@PIPESIZE@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent dial timeout in millisecond.
|
||||
# (default: 10)
|
||||
#dial_timeout_ms = 10
|
||||
dial_timeout_ms = 10
|
||||
|
||||
# Agent reconnect timeout in millisecond.
|
||||
# Retry times = reconnect_timeout_ms / dial_timeout_ms (default: 300)
|
||||
@@ -294,7 +295,7 @@ container_pipe_size=@PIPESIZE@
|
||||
# You'd better not change the value of dial_timeout_ms, unless you have an
|
||||
# idea of what you are doing.
|
||||
# (default: 3000)
|
||||
#reconnect_timeout_ms = 3000
|
||||
reconnect_timeout_ms = 3000
|
||||
|
||||
# Create Container Request Timeout
|
||||
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
|
||||
@@ -307,28 +308,28 @@ container_pipe_size=@PIPESIZE@
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
|
||||
[agent.@PROJECT_TYPE@.mem_agent]
|
||||
# Control the mem-agent function enable or disable.
|
||||
# Default to false
|
||||
#mem_agent_enable = true
|
||||
mem_agent_enable = false
|
||||
|
||||
# Control the mem-agent memcg function disable or enable
|
||||
# Default to false
|
||||
#memcg_disable = false
|
||||
memcg_disable = false
|
||||
|
||||
# Control the mem-agent function swap enable or disable.
|
||||
# Default to false
|
||||
#memcg_swap = false
|
||||
memcg_swap = false
|
||||
|
||||
# Control the mem-agent function swappiness max number.
|
||||
# Default to 50
|
||||
#memcg_swappiness_max = 50
|
||||
memcg_swappiness_max = 50
|
||||
|
||||
# Control the mem-agent memcg function wait period seconds
|
||||
# Default to 600
|
||||
#memcg_period_secs = 600
|
||||
memcg_period_secs = 600
|
||||
|
||||
# Control the mem-agent memcg wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
@@ -336,7 +337,7 @@ container_pipe_size=@PIPESIZE@
|
||||
# then the aging and eviction for this cgroup will not be
|
||||
# executed after this waiting period.
|
||||
# Default to 1
|
||||
#memcg_period_psi_percent_limit = 1
|
||||
memcg_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time for a cgroup
|
||||
@@ -344,44 +345,44 @@ container_pipe_size=@PIPESIZE@
|
||||
# this cgroup will immediately stop and will not resume until
|
||||
# the next memcg waiting period.
|
||||
# Default to 1
|
||||
#memcg_eviction_psi_percent_limit = 1
|
||||
memcg_eviction_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction run aging count min.
|
||||
# A cgroup will only perform eviction when the number of aging cycles
|
||||
# in memcg is greater than or equal to memcg_eviction_run_aging_count_min.
|
||||
# Default to 3
|
||||
#memcg_eviction_run_aging_count_min = 3
|
||||
memcg_eviction_run_aging_count_min = 3
|
||||
|
||||
# Control the mem-agent compact function disable or enable
|
||||
# Default to false
|
||||
#compact_disable = false
|
||||
compact_disable = false
|
||||
|
||||
# Control the mem-agent compaction function wait period seconds
|
||||
# Default to 600
|
||||
#compact_period_secs = 600
|
||||
compact_period_secs = 600
|
||||
|
||||
# Control the mem-agent compaction function wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
# the compaction waiting period exceeds this value,
|
||||
# then the compaction will not be executed after this waiting period.
|
||||
# Default to 1
|
||||
#compact_period_psi_percent_limit = 1
|
||||
compact_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent compaction function compact PSI percent limit.
|
||||
# During compaction, the percentage of memory and IO PSI stall time
|
||||
# is checked every second. If this percentage exceeds
|
||||
# compact_psi_percent_limit, the compaction process will stop.
|
||||
# Default to 5
|
||||
#compact_psi_percent_limit = 5
|
||||
compact_psi_percent_limit = 5
|
||||
|
||||
# Control the maximum number of seconds for each compaction of mem-agent compact function.
|
||||
# Default to 180
|
||||
#compact_sec_max = 180
|
||||
compact_sec_max = 180
|
||||
|
||||
# Control the mem-agent compaction function compact order.
|
||||
# compact_order is use with compact_threshold.
|
||||
# Default to 9
|
||||
#compact_order = 9
|
||||
compact_order = 9
|
||||
|
||||
# Control the mem-agent compaction function compact threshold.
|
||||
# compact_threshold is the pages number.
|
||||
@@ -394,22 +395,22 @@ container_pipe_size=@PIPESIZE@
|
||||
# since the previous compaction.
|
||||
# then the system should initiate another round of memory compaction.
|
||||
# Default to 1024
|
||||
#compact_threshold = 1024
|
||||
compact_threshold = 1024
|
||||
|
||||
# Control the mem-agent compaction function force compact times.
|
||||
# After one compaction, if there has not been a compaction within
|
||||
# the next compact_force_times times, a compaction will be forced
|
||||
# regardless of the system's memory situation.
|
||||
# If compact_force_times is set to 0, will do force compaction each time.
|
||||
# If compact_force_times is set to 18446744073709551615, will never do force compaction.
|
||||
# Default to 18446744073709551615
|
||||
#compact_force_times = 18446744073709551615
|
||||
# If compact_force_times is set to 9223372036854775807, will never do force compaction.
|
||||
# Default to 9223372036854775807
|
||||
compact_force_times = 9223372036854775807
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# The log level will be applied to runtimes.
|
||||
# Possible values are:
|
||||
@@ -420,14 +421,14 @@ container_pipe_size=@PIPESIZE@
|
||||
# - error
|
||||
# - critical
|
||||
# (default: info)
|
||||
#log_level = "info"
|
||||
log_level = "info"
|
||||
|
||||
# If enabled, enabled, it means that 1) if the runtime exits abnormally,
|
||||
# the cleanup process will be skipped, and 2) the runtime will not exit
|
||||
# even if the health check fails.
|
||||
# This option is typically used to retain abnormal information for debugging.
|
||||
# (default: false)
|
||||
#keep_abnormal = true
|
||||
keep_abnormal = false
|
||||
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -452,33 +453,33 @@ container_pipe_size=@PIPESIZE@
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_DB@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_DB@"
|
||||
|
||||
name="@RUNTIMENAME@"
|
||||
hypervisor_name="@HYPERVISOR_DB@"
|
||||
agent_name="@PROJECT_TYPE@"
|
||||
name = "@RUNTIMENAME@"
|
||||
hypervisor_name = "@HYPERVISOR_DB@"
|
||||
agent_name = "@PROJECT_TYPE@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -486,7 +487,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -494,18 +495,18 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_DB@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_DB@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -514,7 +515,7 @@ experimental=@DEFAULTEXPFEATURES@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_DB@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_DB@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted(ro, rw) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
@@ -524,7 +525,7 @@ static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_DB@
|
||||
# - "/path/to", default readonly mode.
|
||||
# - "/path/to:ro", readonly mode.
|
||||
# - "/path/to:rw", readwrite mode.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# Base directory of directly attachable network config.
|
||||
# Network devices for VM-based containers are allowed to be placed in the
|
||||
@@ -540,4 +541,4 @@ dan_conf = "@DEFDANCONF@"
|
||||
use_passfd_io = true
|
||||
|
||||
# If fd passthrough io is enabled, the runtime will attempt to use the specified port instead of the default port.
|
||||
# passfd_listener_port = 1027
|
||||
passfd_listener_port = 1027
|
||||
|
||||
@@ -16,14 +16,13 @@
|
||||
path = "@QEMUPATH@"
|
||||
kernel = "@KERNELPATH_COCO@"
|
||||
image = "@IMAGECONFIDENTIALPATH@"
|
||||
# initrd = "@INITRDCONFIDENTIALPATH@"
|
||||
machine_type = "@MACHINETYPE@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Block storage driver to be used for the VM rootfs is backed
|
||||
# by a block device. This is virtio-blk-pci, virtio-blk-mmio or nvdimm
|
||||
@@ -43,18 +42,12 @@ vm_rootfs_driver = "@VMROOTFSDRIVER_QEMU@"
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
# Choose AMD SEV-SNP confidential guests
|
||||
# In case of using confidential guests on AMD hardware that supports both SEV
|
||||
# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default.
|
||||
# Default false
|
||||
# sev_snp_guest = true
|
||||
confidential_guest = false
|
||||
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -92,7 +85,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -100,12 +93,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -151,7 +145,7 @@ default_bridges = @DEFBRIDGES@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Default memory size in MiB for SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||
@@ -160,7 +154,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -173,13 +167,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -256,17 +250,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently only implemented
|
||||
@@ -281,7 +275,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -289,7 +283,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -306,11 +300,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -326,7 +320,7 @@ vhost_user_reconnect_timeout_sec = 0
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -341,7 +335,7 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# This option allows to add an extra HMP or QMP socket when `enable_debug = true`
|
||||
#
|
||||
@@ -356,17 +350,18 @@ pflashes = []
|
||||
#
|
||||
# If set to the empty string "", no extra monitor socket is added. This is
|
||||
# the default.
|
||||
#extra_monitor_socket = "hmp"
|
||||
extra_monitor_socket = ""
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
# Default false
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -374,44 +369,44 @@ pflashes = []
|
||||
# nvdimm is not supported when `confidential_guest = true`.
|
||||
#
|
||||
# Default is false
|
||||
#disable_image_nvdimm = true
|
||||
disable_image_nvdimm = false
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hotplugging on root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
# a bridge.
|
||||
# Default false
|
||||
#hotplug_vfio_on_root_bus = true
|
||||
hotplug_vfio_on_root_bus = false
|
||||
|
||||
# Enable hot-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port"
|
||||
#hot_plug_vfio = "root-port"
|
||||
hot_plug_vfio = "no-port"
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
# Enable cold-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port", which means disabled.
|
||||
#cold_plug_vfio = "root-port"
|
||||
cold_plug_vfio = "no-port"
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# Before hot plugging a PCIe device onto a switch port, you need add a pcie_switch_port device fist.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means how many devices attached onto pcie_switch_port will be created.
|
||||
# This value is valid when hotplug_vfio_on_root_bus is true, and machine_type is "q35"
|
||||
# Default 0
|
||||
#pcie_switch_port = 2
|
||||
pcie_switch_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -423,7 +418,7 @@ pflashes = []
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source = "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -445,29 +440,19 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
|
||||
# Enable connection to Quote Generation Service (QGS)
|
||||
# The "tdx_quote_generation_service_socket_port" parameter configures how QEMU connects to the TDX Quote Generation Service (QGS).
|
||||
# This connection is essential for Trusted Domain (TD) attestation, as QGS signs the TDREPORT sent by QEMU via the GetQuote hypercall.
|
||||
# By default QGS runs on vsock port 4050, but can be modified by the host admin. For QEMU's tdx-guest object, this connection needs to
|
||||
# be specified in a JSON format, for example:
|
||||
# -object '{"qom-type":"tdx-guest","id":"tdx","quote-generation-socket":{"type":"vsock","cid":"2","port":"4050"}}'
|
||||
# It's important to note that setting "tdx_quote_generation_service_socket_port" to 0 enables communication via Unix Domain Sockets (UDS).
|
||||
# To activate UDS, the QGS service itself must be launched with the "-port=0" parameter and the UDS will always be located at /var/run/tdx-qgs/qgs.socket.
|
||||
# -object '{"qom-type":"tdx-guest","id":"tdx","quote-generation-socket":{"type":"unix","path":"/var/run/tdx-qgs/qgs.socket"}}'
|
||||
# tdx_quote_generation_service_socket_port = @QEMUTDXQUOTEGENERATIONSERVICESOCKETPORT@
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -477,9 +462,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -490,17 +476,17 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
#guest_memory_dump_paging=false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -515,41 +501,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
# > 0 --> will be set to the specified number
|
||||
#
|
||||
# VMCache is a function that creates VMs as caches before using it.
|
||||
# It helps speed up new container creation.
|
||||
# The function consists of a server and some clients communicating
|
||||
# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto.
|
||||
# The VMCache server will create some VMs and cache them by factory cache.
|
||||
# It will convert the VM to gRPC format and transport it when gets
|
||||
# requestion from clients.
|
||||
# Factory grpccache is the VMCache client. It will request gRPC format
|
||||
# VM and convert it back to a VM. If VMCache function is enabled,
|
||||
# kata-runtime will request VM from factory grpccache when it creates
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -563,7 +525,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -576,18 +538,18 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent dial timeout in millisecond.
|
||||
# (default: 10)
|
||||
#dial_timeout_ms = 10
|
||||
dial_timeout_ms = 10
|
||||
|
||||
# Agent reconnect timeout in millisecond.
|
||||
# Retry times = reconnect_timeout_ms / dial_timeout_ms (default: 300)
|
||||
@@ -596,7 +558,7 @@ kernel_modules=[]
|
||||
# You'd better not change the value of dial_timeout_ms, unless you have an
|
||||
# idea of what you are doing.
|
||||
# (default: 3000)
|
||||
#reconnect_timeout_ms = 3000
|
||||
reconnect_timeout_ms = 3000
|
||||
|
||||
# Create Container Request Timeout
|
||||
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
|
||||
@@ -609,28 +571,28 @@ kernel_modules=[]
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
|
||||
[agent.@PROJECT_TYPE@.mem_agent]
|
||||
# Control the mem-agent function enable or disable.
|
||||
# Default to false
|
||||
#mem_agent_enable = true
|
||||
mem_agent_enable = false
|
||||
|
||||
# Control the mem-agent memcg function disable or enable
|
||||
# Default to false
|
||||
#memcg_disable = false
|
||||
memcg_disable = false
|
||||
|
||||
# Control the mem-agent function swap enable or disable.
|
||||
# Default to false
|
||||
#memcg_swap = false
|
||||
memcg_swap = false
|
||||
|
||||
# Control the mem-agent function swappiness max number.
|
||||
# Default to 50
|
||||
#memcg_swappiness_max = 50
|
||||
memcg_swappiness_max = 50
|
||||
|
||||
# Control the mem-agent memcg function wait period seconds
|
||||
# Default to 600
|
||||
#memcg_period_secs = 600
|
||||
memcg_period_secs = 600
|
||||
|
||||
# Control the mem-agent memcg wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
@@ -638,7 +600,7 @@ kernel_modules=[]
|
||||
# then the aging and eviction for this cgroup will not be
|
||||
# executed after this waiting period.
|
||||
# Default to 1
|
||||
#memcg_period_psi_percent_limit = 1
|
||||
memcg_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time for a cgroup
|
||||
@@ -646,44 +608,44 @@ kernel_modules=[]
|
||||
# this cgroup will immediately stop and will not resume until
|
||||
# the next memcg waiting period.
|
||||
# Default to 1
|
||||
#memcg_eviction_psi_percent_limit = 1
|
||||
memcg_eviction_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction run aging count min.
|
||||
# A cgroup will only perform eviction when the number of aging cycles
|
||||
# in memcg is greater than or equal to memcg_eviction_run_aging_count_min.
|
||||
# Default to 3
|
||||
#memcg_eviction_run_aging_count_min = 3
|
||||
memcg_eviction_run_aging_count_min = 3
|
||||
|
||||
# Control the mem-agent compact function disable or enable
|
||||
# Default to false
|
||||
#compact_disable = false
|
||||
compact_disable = false
|
||||
|
||||
# Control the mem-agent compaction function wait period seconds
|
||||
# Default to 600
|
||||
#compact_period_secs = 600
|
||||
compact_period_secs = 600
|
||||
|
||||
# Control the mem-agent compaction function wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
# the compaction waiting period exceeds this value,
|
||||
# then the compaction will not be executed after this waiting period.
|
||||
# Default to 1
|
||||
#compact_period_psi_percent_limit = 1
|
||||
compact_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent compaction function compact PSI percent limit.
|
||||
# During compaction, the percentage of memory and IO PSI stall time
|
||||
# is checked every second. If this percentage exceeds
|
||||
# compact_psi_percent_limit, the compaction process will stop.
|
||||
# Default to 5
|
||||
#compact_psi_percent_limit = 5
|
||||
compact_psi_percent_limit = 5
|
||||
|
||||
# Control the maximum number of seconds for each compaction of mem-agent compact function.
|
||||
# Default to 180
|
||||
#compact_sec_max = 180
|
||||
compact_sec_max = 180
|
||||
|
||||
# Control the mem-agent compaction function compact order.
|
||||
# compact_order is use with compact_threshold.
|
||||
# Default to 9
|
||||
#compact_order = 9
|
||||
compact_order = 9
|
||||
|
||||
# Control the mem-agent compaction function compact threshold.
|
||||
# compact_threshold is the pages number.
|
||||
@@ -696,16 +658,16 @@ kernel_modules=[]
|
||||
# since the previous compaction.
|
||||
# then the system should initiate another round of memory compaction.
|
||||
# Default to 1024
|
||||
#compact_threshold = 1024
|
||||
compact_threshold = 1024
|
||||
|
||||
# Control the mem-agent compaction function force compact times.
|
||||
# After one compaction, if there has not been a compaction within
|
||||
# the next compact_force_times times, a compaction will be forced
|
||||
# regardless of the system's memory situation.
|
||||
# If compact_force_times is set to 0, will do force compaction each time.
|
||||
# If compact_force_times is set to 18446744073709551615, will never do force compaction.
|
||||
# Default to 18446744073709551615
|
||||
#compact_force_times = 18446744073709551615
|
||||
# If compact_force_times is set to 9223372036854775807, will never do force compaction.
|
||||
# Default to 9223372036854775807
|
||||
compact_force_times = 9223372036854775807
|
||||
|
||||
# Create Container Request Timeout
|
||||
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
|
||||
@@ -718,13 +680,14 @@ kernel_modules=[]
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT_COCO@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT_COCO@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT_COCO@
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -742,23 +705,23 @@ kernel_modules=[]
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
name="@RUNTIMENAME@"
|
||||
hypervisor_name="@HYPERVISOR_QEMU@"
|
||||
agent_name="@PROJECT_TYPE@"
|
||||
name = "@RUNTIMENAME@"
|
||||
hypervisor_name = "@HYPERVISOR_QEMU@"
|
||||
agent_name = "@PROJECT_TYPE@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -766,22 +729,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -789,7 +753,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -797,7 +761,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_QEMU@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_QEMU@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -806,13 +770,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_QEMU@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_COCO@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_COCO@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -833,19 +797,19 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
@@ -16,45 +16,22 @@
|
||||
path = "@QEMUPATH@"
|
||||
kernel = "@KERNELPATH_QEMU@"
|
||||
image = "@IMAGEPATH@"
|
||||
# initrd = "@INITRDPATH@"
|
||||
machine_type = "@MACHINETYPE@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Block storage driver to be used for the VM rootfs is backed
|
||||
# by a block device. This is virtio-blk-pci, virtio-blk-mmio or nvdimm
|
||||
vm_rootfs_driver = "@VMROOTFSDRIVER_QEMU@"
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
# Choose AMD SEV-SNP confidential guests
|
||||
# In case of using confidential guests on AMD hardware that supports both SEV
|
||||
# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default.
|
||||
# Default false
|
||||
# sev_snp_guest = true
|
||||
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -92,7 +69,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -100,12 +77,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -151,7 +129,7 @@ default_bridges = @DEFBRIDGES@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Default memory size in MiB for SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||
@@ -160,7 +138,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -173,13 +151,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -262,17 +240,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently only implemented
|
||||
@@ -281,10 +259,10 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
|
||||
# Virtio queue size. Size: byte. default 128
|
||||
#queue_size: u32,
|
||||
queue_size = 128
|
||||
|
||||
# Block device multi-queue, default 1
|
||||
#num_queues: usize,
|
||||
num_queues = 1
|
||||
|
||||
# Enable pre allocation of VM RAM, default false
|
||||
# Enabling this will result in lower container density
|
||||
@@ -293,7 +271,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -301,7 +279,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -318,11 +296,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -338,7 +316,7 @@ vhost_user_reconnect_timeout_sec = 0
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -353,7 +331,7 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# This option allows to add an extra HMP or QMP socket when `enable_debug = true`
|
||||
#
|
||||
@@ -368,17 +346,17 @@ pflashes = []
|
||||
#
|
||||
# If set to the empty string "", no extra monitor socket is added. This is
|
||||
# the default.
|
||||
#extra_monitor_socket = "hmp"
|
||||
extra_monitor_socket = ""
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -386,44 +364,44 @@ pflashes = []
|
||||
# nvdimm is not supported when `confidential_guest = true`.
|
||||
#
|
||||
# Default is false
|
||||
#disable_image_nvdimm = true
|
||||
disable_image_nvdimm = false
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hotplugging on root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
# a bridge.
|
||||
# Default false
|
||||
#hotplug_vfio_on_root_bus = true
|
||||
hotplug_vfio_on_root_bus = false
|
||||
|
||||
# Enable hot-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port"
|
||||
#hot_plug_vfio = "root-port"
|
||||
hot_plug_vfio = "no-port"
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
# Enable cold-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port", which means disabled.
|
||||
#cold_plug_vfio = "root-port"
|
||||
cold_plug_vfio = "no-port"
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# Before hot plugging a PCIe device onto a switch port, you need add a pcie_switch_port device fist.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means how many devices attached onto pcie_switch_port will be created.
|
||||
# This value is valid when hotplug_vfio_on_root_bus is true, and machine_type is "q35"
|
||||
# Default 0
|
||||
#pcie_switch_port = 2
|
||||
pcie_switch_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -435,7 +413,7 @@ pflashes = []
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source = "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -457,7 +435,8 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
# Enable connection to Quote Generation Service (QGS)
|
||||
# The "tdx_quote_generation_service_socket_port" parameter configures how QEMU connects to the TDX Quote Generation Service (QGS).
|
||||
@@ -468,18 +447,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# It's important to note that setting "tdx_quote_generation_service_socket_port" to 0 enables communication via Unix Domain Sockets (UDS).
|
||||
# To activate UDS, the QGS service itself must be launched with the "-port=0" parameter and the UDS will always be located at /var/run/tdx-qgs/qgs.socket.
|
||||
# -object '{"qom-type":"tdx-guest","id":"tdx","quote-generation-socket":{"type":"unix","path":"/var/run/tdx-qgs/qgs.socket"}}'
|
||||
# tdx_quote_generation_service_socket_port = @QEMUTDXQUOTEGENERATIONSERVICESOCKETPORT@
|
||||
tdx_quote_generation_service_socket_port = @QEMUTDXQUOTEGENERATIONSERVICESOCKETPORT@
|
||||
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -489,9 +468,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -499,20 +479,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[hypervisor.qemu.factory]
|
||||
@@ -527,41 +507,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
# > 0 --> will be set to the specified number
|
||||
#
|
||||
# VMCache is a function that creates VMs as caches before using it.
|
||||
# It helps speed up new container creation.
|
||||
# The function consists of a server and some clients communicating
|
||||
# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto.
|
||||
# The VMCache server will create some VMs and cache them by factory cache.
|
||||
# It will convert the VM to gRPC format and transport it when gets
|
||||
# requestion from clients.
|
||||
# Factory grpccache is the VMCache client. It will request gRPC format
|
||||
# VM and convert it back to a VM. If VMCache function is enabled,
|
||||
# kata-runtime will request VM from factory grpccache when it creates
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -575,7 +531,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -588,18 +544,18 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent dial timeout in millisecond.
|
||||
# (default: 10)
|
||||
#dial_timeout_ms = 10
|
||||
dial_timeout_ms = 10
|
||||
|
||||
# Agent reconnect timeout in millisecond.
|
||||
# Retry times = reconnect_timeout_ms / dial_timeout_ms (default: 300)
|
||||
@@ -608,28 +564,28 @@ kernel_modules=[]
|
||||
# You'd better not change the value of dial_timeout_ms, unless you have an
|
||||
# idea of what you are doing.
|
||||
# (default: 3000)
|
||||
#reconnect_timeout_ms = 3000
|
||||
reconnect_timeout_ms = 3000
|
||||
|
||||
[agent.@PROJECT_TYPE@.mem_agent]
|
||||
# Control the mem-agent function enable or disable.
|
||||
# Default to false
|
||||
#mem_agent_enable = true
|
||||
mem_agent_enable = false
|
||||
|
||||
# Control the mem-agent memcg function disable or enable
|
||||
# Default to false
|
||||
#memcg_disable = false
|
||||
memcg_disable = false
|
||||
|
||||
# Control the mem-agent function swap enable or disable.
|
||||
# Default to false
|
||||
#memcg_swap = false
|
||||
memcg_swap = false
|
||||
|
||||
# Control the mem-agent function swappiness max number.
|
||||
# Default to 50
|
||||
#memcg_swappiness_max = 50
|
||||
memcg_swappiness_max = 50
|
||||
|
||||
# Control the mem-agent memcg function wait period seconds
|
||||
# Default to 600
|
||||
#memcg_period_secs = 600
|
||||
memcg_period_secs = 600
|
||||
|
||||
# Control the mem-agent memcg wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
@@ -637,7 +593,7 @@ kernel_modules=[]
|
||||
# then the aging and eviction for this cgroup will not be
|
||||
# executed after this waiting period.
|
||||
# Default to 1
|
||||
#memcg_period_psi_percent_limit = 1
|
||||
memcg_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time for a cgroup
|
||||
@@ -645,44 +601,44 @@ kernel_modules=[]
|
||||
# this cgroup will immediately stop and will not resume until
|
||||
# the next memcg waiting period.
|
||||
# Default to 1
|
||||
#memcg_eviction_psi_percent_limit = 1
|
||||
memcg_eviction_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent memcg eviction run aging count min.
|
||||
# A cgroup will only perform eviction when the number of aging cycles
|
||||
# in memcg is greater than or equal to memcg_eviction_run_aging_count_min.
|
||||
# Default to 3
|
||||
#memcg_eviction_run_aging_count_min = 3
|
||||
memcg_eviction_run_aging_count_min = 3
|
||||
|
||||
# Control the mem-agent compact function disable or enable
|
||||
# Default to false
|
||||
#compact_disable = false
|
||||
compact_disable = false
|
||||
|
||||
# Control the mem-agent compaction function wait period seconds
|
||||
# Default to 600
|
||||
#compact_period_secs = 600
|
||||
compact_period_secs = 600
|
||||
|
||||
# Control the mem-agent compaction function wait period PSI percent limit.
|
||||
# If the percentage of memory and IO PSI stall time within
|
||||
# the compaction waiting period exceeds this value,
|
||||
# then the compaction will not be executed after this waiting period.
|
||||
# Default to 1
|
||||
#compact_period_psi_percent_limit = 1
|
||||
compact_period_psi_percent_limit = 1
|
||||
|
||||
# Control the mem-agent compaction function compact PSI percent limit.
|
||||
# During compaction, the percentage of memory and IO PSI stall time
|
||||
# is checked every second. If this percentage exceeds
|
||||
# compact_psi_percent_limit, the compaction process will stop.
|
||||
# Default to 5
|
||||
#compact_psi_percent_limit = 5
|
||||
compact_psi_percent_limit = 5
|
||||
|
||||
# Control the maximum number of seconds for each compaction of mem-agent compact function.
|
||||
# Default to 180
|
||||
#compact_sec_max = 180
|
||||
# Default to 300
|
||||
compact_sec_max = 300
|
||||
|
||||
# Control the mem-agent compaction function compact order.
|
||||
# compact_order is use with compact_threshold.
|
||||
# Default to 9
|
||||
#compact_order = 9
|
||||
compact_order = 9
|
||||
|
||||
# Control the mem-agent compaction function compact threshold.
|
||||
# compact_threshold is the pages number.
|
||||
@@ -695,7 +651,7 @@ kernel_modules=[]
|
||||
# since the previous compaction.
|
||||
# then the system should initiate another round of memory compaction.
|
||||
# Default to 1024
|
||||
#compact_threshold = 1024
|
||||
compact_threshold = 1024
|
||||
|
||||
# Control the mem-agent compaction function force compact times.
|
||||
# After one compaction, if there has not been a compaction within
|
||||
@@ -704,7 +660,9 @@ kernel_modules=[]
|
||||
# If compact_force_times is set to 0, will do force compaction each time.
|
||||
# If compact_force_times is set to 18446744073709551615, will never do force compaction.
|
||||
# Default to 18446744073709551615
|
||||
#compact_force_times = 18446744073709551615
|
||||
# Note: Using a large but valid u64 value (within i64::MAX range) instead of u64::MAX to avoid TOML parser issues
|
||||
# Using 9223372036854775807 (i64::MAX) which is effectively "never" for practical purposes
|
||||
compact_force_times = 9223372036854775807
|
||||
|
||||
# Create Container Request Timeout
|
||||
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
|
||||
@@ -717,14 +675,14 @@ kernel_modules=[]
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
#
|
||||
enable_debug = false
|
||||
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
# the container network interface
|
||||
@@ -741,23 +699,23 @@ kernel_modules=[]
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
name="@RUNTIMENAME@"
|
||||
hypervisor_name="@HYPERVISOR_QEMU@"
|
||||
agent_name="@PROJECT_TYPE@"
|
||||
name = "@RUNTIMENAME@"
|
||||
hypervisor_name = "@HYPERVISOR_QEMU@"
|
||||
agent_name = "@PROJECT_TYPE@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -765,22 +723,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -788,7 +747,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -796,7 +755,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_QEMU@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_QEMU@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -805,13 +764,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_QEMU@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_QEMU@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_QEMU@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -832,19 +791,19 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
@@ -40,7 +40,7 @@ confidential_guest = true
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -78,7 +78,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -86,12 +86,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -136,7 +137,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -149,13 +150,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -238,17 +239,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently only implemented
|
||||
@@ -257,10 +258,10 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
|
||||
# Virtio queue size. Size: byte. default 128
|
||||
#queue_size: u32,
|
||||
queue_size = 128
|
||||
|
||||
# Block device multi-queue, default 1
|
||||
#num_queues: usize,
|
||||
num_queues = 1
|
||||
|
||||
# Enable pre allocation of VM RAM, default false
|
||||
# Enabling this will result in lower container density
|
||||
@@ -269,7 +270,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -277,7 +278,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -294,11 +295,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -309,7 +310,7 @@ valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -324,17 +325,17 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -342,33 +343,33 @@ pflashes = []
|
||||
# nvdimm is not supported when `confidential_guest = true`.
|
||||
#
|
||||
# Default is false
|
||||
#disable_image_nvdimm = true
|
||||
disable_image_nvdimm = false
|
||||
|
||||
# Enable hot-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port"
|
||||
#hot_plug_vfio = "root-port"
|
||||
hot_plug_vfio = "no-port"
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
# Enable cold-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port", which means disabled.
|
||||
cold_plug_vfio = "root-port"
|
||||
cold_plug_vfio = "no-port"
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hotplugging on root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
# a bridge.
|
||||
# Default false
|
||||
#hotplug_vfio_on_root_bus = true
|
||||
hotplug_vfio_on_root_bus = false
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
@@ -384,7 +385,7 @@ disable_vhost_net = true
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source = "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -406,17 +407,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
#
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -426,9 +428,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -436,7 +439,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -447,20 +450,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -475,41 +478,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
# > 0 --> will be set to the specified number
|
||||
#
|
||||
# VMCache is a function that creates VMs as caches before using it.
|
||||
# It helps speed up new container creation.
|
||||
# The function consists of a server and some clients communicating
|
||||
# through Unix socket. The protocol is gRPC in protocols/cache/cache.proto.
|
||||
# The VMCache server will create some VMs and cache them by factory cache.
|
||||
# It will convert the VM to gRPC format and transport it when gets
|
||||
# requestion from clients.
|
||||
# Factory grpccache is the VMCache client. It will request gRPC format
|
||||
# VM and convert it back to a VM. If VMCache function is enabled,
|
||||
# kata-runtime will request VM from factory grpccache when it creates
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -523,7 +502,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -536,14 +515,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent dial timeout in millisecond.
|
||||
# (default: 10)
|
||||
@@ -569,14 +548,14 @@ reconnect_timeout_ms = 5000
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
#
|
||||
enable_debug = false
|
||||
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
# the container network interface
|
||||
@@ -593,23 +572,23 @@ reconnect_timeout_ms = 5000
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
name="@RUNTIMENAME@"
|
||||
hypervisor_name="@HYPERVISOR_QEMU@"
|
||||
agent_name="@PROJECT_TYPE@"
|
||||
name = "@RUNTIMENAME@"
|
||||
hypervisor_name = "@HYPERVISOR_QEMU@"
|
||||
agent_name = "@PROJECT_TYPE@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -617,22 +596,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -640,7 +620,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -648,7 +628,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_QEMU@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_QEMU@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -657,13 +637,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_QEMU@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_QEMU@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_QEMU@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -684,19 +664,19 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE_SE@"
|
||||
vfio_mode = "@DEFVFIOMODE_SE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
@@ -19,24 +19,6 @@ remote_hypervisor_socket = "/run/peerpod/hypervisor.sock"
|
||||
# Timeout in seconds for creating a remote hypervisor, 600s(10min) by default
|
||||
remote_hypervisor_timeout = 600
|
||||
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
@@ -54,7 +36,7 @@ enable_annotations = ["machine_type", "default_memory", "default_vcpus", "defaul
|
||||
# To see the list of default parameters, enable hypervisor debug, create a
|
||||
# container and look for 'default-kernel-parameters' log entries.
|
||||
# NOTE: kernel_params are not currently passed over in remote hypervisor
|
||||
# kernel_params = ""
|
||||
kernel_params = ""
|
||||
|
||||
# Path to the firmware.
|
||||
# If you want that qemu uses the default firmware leave this option empty
|
||||
@@ -65,7 +47,7 @@ firmware = "@FIRMWAREPATH@"
|
||||
# < 0 --> will be set to the actual number of physical cores
|
||||
# > 0 <= number of physical cores --> will be set to the specified number
|
||||
# > number of physical cores --> will be set to the actual number of physical cores
|
||||
# default_vcpus = 1
|
||||
default_vcpus = 1
|
||||
|
||||
# Default maximum number of vCPUs per SB/VM:
|
||||
# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number
|
||||
@@ -82,7 +64,7 @@ firmware = "@FIRMWAREPATH@"
|
||||
# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
|
||||
# unless you know what are you doing.
|
||||
# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8.
|
||||
# default_maxvcpus = @DEFMAXVCPUS@
|
||||
default_maxvcpus = @DEFMAXVCPUS@
|
||||
|
||||
# Bridges can be used to hot plug devices.
|
||||
# Limitations:
|
||||
@@ -99,19 +81,19 @@ default_bridges = @DEFBRIDGES@
|
||||
# Default memory size in MiB for SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||
# Note: the remote hypervisor uses the peer pod config to determine the memory of the VM
|
||||
# default_memory = @DEFMEMSZ@
|
||||
default_memory = @DEFMEMSZ@
|
||||
#
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
# Note: the remote hypervisor uses the peer pod config to determine the memory of the VM
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available. And Debug also enable the hmp socket.
|
||||
#
|
||||
# Default false
|
||||
# enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Path to OCI hook binaries in the *guest rootfs*.
|
||||
# This does not affect host-side hooks which must instead be added to
|
||||
@@ -128,10 +110,11 @@ default_bridges = @DEFBRIDGES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
@@ -144,7 +127,7 @@ disable_guest_selinux = true
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
# enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -158,18 +141,18 @@ disable_guest_selinux = true
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
# enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 30)
|
||||
#dial_timeout = 30
|
||||
dial_timeout = 30
|
||||
|
||||
# Create Container Request Timeout
|
||||
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
|
||||
@@ -182,13 +165,13 @@ disable_guest_selinux = true
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
# enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -207,11 +190,11 @@ disable_guest_selinux = true
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
# Note: The remote hypervisor, uses it's own network, so "none" is required
|
||||
internetworking_model="none"
|
||||
internetworking_model = "none"
|
||||
|
||||
name="virt_container"
|
||||
hypervisor_name="remote"
|
||||
agent_name="kata"
|
||||
name = "virt_container"
|
||||
hypervisor_name = "remote"
|
||||
agent_name = "kata"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
@@ -219,7 +202,7 @@ agent_name="kata"
|
||||
# within the guest
|
||||
# (default: true)
|
||||
# Note: The remote hypervisor has a different guest, so currently requires this to be set to true
|
||||
disable_guest_seccomp=true
|
||||
disable_guest_seccomp = true
|
||||
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
@@ -228,22 +211,23 @@ disable_guest_seccomp=true
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -260,7 +244,7 @@ disable_new_netns = false
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_REMOTE@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_REMOTE@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -270,7 +254,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_REMOTE@
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
# Note: the remote hypervisor uses the peer pod config to determine the sandbox size, so requires this to be set to true
|
||||
static_sandbox_resource_mgmt=true
|
||||
static_sandbox_resource_mgmt = true
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -291,20 +275,20 @@ static_sandbox_resource_mgmt=true
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
# Note: remote hypervisor has no sharing of emptydir mounts from host to guest
|
||||
disable_guest_empty_dir=false
|
||||
disable_guest_empty_dir = false
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
@@ -16,7 +16,7 @@ path = "@FCPATH@"
|
||||
kernel = "@KERNELPATH_FC@"
|
||||
image = "@IMAGEPATH@"
|
||||
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
@@ -32,7 +32,7 @@ valid_hypervisor_paths = @FCVALIDHYPERVISORPATHS@
|
||||
# If the jailer path is not set kata will launch firecracker
|
||||
# without a jail. If the jailer is set firecracker will be
|
||||
# launched in a jailed enviornment created by the jailer
|
||||
#jailer_path = "@FCJAILERPATH@"
|
||||
jailer_path = "@FCJAILERPATH@"
|
||||
|
||||
# List of valid jailer path values for the hypervisor
|
||||
# Each member of the list can be a regular expression
|
||||
@@ -104,7 +104,7 @@ memory_slots = @DEFMEMSLOTS@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -121,12 +121,12 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Bandwidth rate limiter options
|
||||
#
|
||||
@@ -134,14 +134,14 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#disk_rate_limiter_bw_max_rate = 0
|
||||
disk_rate_limiter_bw_max_rate = 0
|
||||
#
|
||||
# disk_rate_limiter_bw_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#disk_rate_limiter_bw_one_time_burst = 0
|
||||
disk_rate_limiter_bw_one_time_burst = 0
|
||||
#
|
||||
# Operation rate limiter options
|
||||
#
|
||||
@@ -149,20 +149,20 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#disk_rate_limiter_ops_max_rate = 0
|
||||
disk_rate_limiter_ops_max_rate = 0
|
||||
#
|
||||
# disk_rate_limiter_ops_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#disk_rate_limiter_ops_one_time_burst = 0
|
||||
disk_rate_limiter_ops_one_time_burst = 0
|
||||
|
||||
# Virtio queue size. Size: byte. default 128
|
||||
#queue_size: u32,
|
||||
queue_size = 128
|
||||
|
||||
# Block device multi-queue, default 1
|
||||
#num_queues: usize,
|
||||
num_queues = 1
|
||||
|
||||
# Enable pre allocation of VM RAM, default false
|
||||
# Enabling this will result in lower container density
|
||||
@@ -171,7 +171,7 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -179,39 +179,40 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Disable the 'seccomp' feature from Cloud Hypervisor, firecracker or dragonball, default false
|
||||
# disable_seccomp = true
|
||||
disable_seccomp = false
|
||||
|
||||
# Enable vIOMMU, default false
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
# Default false
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hotplugging on root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
# a bridge.
|
||||
# Default false
|
||||
#hotplug_vfio_on_root_bus = true
|
||||
hotplug_vfio_on_root_bus = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -223,7 +224,7 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source = "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -245,21 +246,22 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered will scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Firecracker, it provides a built-in rate limiter, which is based on TBF(Token Bucket Filter)
|
||||
# queueing discipline.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Firecracker, it provides a built-in rate limiter, which is based on TBF(Token Bucket Filter)
|
||||
# queueing discipline.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
[factory]
|
||||
# VM templating support. Once enabled, new VMs are created from template
|
||||
@@ -273,12 +275,12 @@ disable_selinux=@DEFDISABLESELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -292,7 +294,7 @@ disable_selinux=@DEFDISABLESELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -305,14 +307,14 @@ disable_selinux=@DEFDISABLESELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 45)
|
||||
@@ -320,7 +322,7 @@ dial_timeout = 45
|
||||
|
||||
# Confidential Data Hub API timeout value in seconds
|
||||
# (default: 50)
|
||||
#cdh_api_timeout = 50
|
||||
cdh_api_timeout = 50
|
||||
|
||||
# Create Container Request Timeout
|
||||
# This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
|
||||
@@ -333,13 +335,14 @@ dial_timeout = 45
|
||||
# - runtime-request-timeout: The timeout value specified in the Kubelet configuration described as the link below:
|
||||
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout)
|
||||
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
||||
# create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -357,33 +360,33 @@ dial_timeout = 45
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_FC@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_FC@"
|
||||
|
||||
name="@RUNTIMENAME@"
|
||||
hypervisor_name="@HYPERVISOR_FC@"
|
||||
agent_name="@PROJECT_TYPE@"
|
||||
name = "@RUNTIMENAME@"
|
||||
hypervisor_name = "@HYPERVISOR_FC@"
|
||||
agent_name = "@PROJECT_TYPE@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -391,7 +394,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -399,7 +402,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_FC@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_FC@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -408,19 +411,19 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_FC@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_FC@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_FC@
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
@@ -233,13 +233,19 @@ DEFDISABLESELINUX := false
|
||||
|
||||
# Default guest SELinux configuration
|
||||
DEFDISABLEGUESTSELINUX := true
|
||||
DEFGUESTSELINUXLABEL := system_u:system_r:container_t
|
||||
# Default is empty string "" to match the default golang (when commented out in config).
|
||||
# Most users will want to set this to "system_u:system_r:container_t" for SELinux support.
|
||||
DEFGUESTSELINUXLABEL :=
|
||||
|
||||
#Default SeccomSandbox param
|
||||
#The same default policy is used by libvirt
|
||||
#More explanation on https://lists.gnu.org/archive/html/qemu-devel/2017-02/msg03348.html
|
||||
# More explanation on https://lists.gnu.org/archive/html/qemu-devel/2017-02/msg03348.html
|
||||
#
|
||||
# Default is empty string "" to match the default (when commented out in config).
|
||||
# Most users will want to set this to "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
# for better security. Note: "elevateprivileges=deny" doesn't work with daemonize option.
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
DEFSECCOMPSANDBOXPARAM := on,obsolete=deny,spawn=deny,resourcecontrol=deny
|
||||
DEFSECCOMPSANDBOXPARAM :=
|
||||
|
||||
#Default entropy source
|
||||
DEFENTROPYSOURCE := /dev/urandom
|
||||
|
||||
@@ -20,41 +20,22 @@ image = "@IMAGEPATH@"
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Supported TEEs:
|
||||
# * Intel TDX
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable running clh VMM as a non-root user.
|
||||
# By default clh VMM run as root. When this is set to true, clh VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
# Path to the firmware.
|
||||
# If you want Cloud Hypervisor to use a specific firmware, set its path below.
|
||||
@@ -120,7 +101,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -182,12 +163,12 @@ block_device_driver = "virtio-blk"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -197,32 +178,32 @@ block_device_driver = "virtio-blk"
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
# being allocated using huge pages.
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Disable the 'seccomp' feature from Cloud Hypervisor, default false
|
||||
# disable_seccomp = true
|
||||
disable_seccomp = false
|
||||
|
||||
# Enable vIOMMU, default false
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# This option specifies the loglevel of the hypervisor
|
||||
#
|
||||
# Default 1
|
||||
#hypervisor_loglevel = 1
|
||||
hypervisor_loglevel = 1
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -232,7 +213,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
|
||||
# Enable hot-plugging of VFIO devices to a root-port.
|
||||
# The default setting is "no-port"
|
||||
#hot_plug_vfio = "root-port"
|
||||
hot_plug_vfio = "no-port"
|
||||
|
||||
# Path to OCI hook binaries in the *guest rootfs*.
|
||||
# This does not affect host-side hooks which must instead be added to
|
||||
@@ -249,7 +230,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# These options are related to network rate limiter at the VMM level, and are
|
||||
# based on the Cloud Hypervisor I/O throttling. Those are disabled by default
|
||||
@@ -263,14 +244,14 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#net_rate_limiter_bw_max_rate = 0
|
||||
net_rate_limiter_bw_max_rate = 0
|
||||
#
|
||||
# net_rate_limiter_bw_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#net_rate_limiter_bw_one_time_burst = 0
|
||||
net_rate_limiter_bw_one_time_burst = 0
|
||||
#
|
||||
# Operation rate limiter options
|
||||
#
|
||||
@@ -278,14 +259,14 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#net_rate_limiter_ops_max_rate = 0
|
||||
net_rate_limiter_ops_max_rate = 0
|
||||
#
|
||||
# net_rate_limiter_ops_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#net_rate_limiter_ops_one_time_burst = 0
|
||||
net_rate_limiter_ops_one_time_burst = 0
|
||||
#
|
||||
# These options are related to disk rate limiter at the VMM level, and are
|
||||
# based on the Cloud Hypervisor I/O throttling. Those are disabled by default
|
||||
@@ -299,14 +280,14 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#disk_rate_limiter_bw_max_rate = 0
|
||||
disk_rate_limiter_bw_max_rate = 0
|
||||
#
|
||||
# disk_rate_limiter_bw_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#disk_rate_limiter_bw_one_time_burst = 0
|
||||
disk_rate_limiter_bw_one_time_burst = 0
|
||||
#
|
||||
# Operation rate limiter options
|
||||
#
|
||||
@@ -314,19 +295,19 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# for SB/VM).
|
||||
# The same value is used for inbound and outbound bandwidth.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#disk_rate_limiter_ops_max_rate = 0
|
||||
disk_rate_limiter_ops_max_rate = 0
|
||||
#
|
||||
# disk_rate_limiter_ops_one_time_burst increases the initial max rate and this
|
||||
# initial extra credit does *NOT* affect the overall limit and can be used for
|
||||
# an *initial* burst of data.
|
||||
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
||||
# set to a non zero value.
|
||||
#disk_rate_limiter_ops_one_time_burst = 0
|
||||
disk_rate_limiter_ops_one_time_burst = 0
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -340,14 +321,14 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 45)
|
||||
@@ -355,13 +336,13 @@ dial_timeout = 45
|
||||
|
||||
# Confidential Data Hub API timeout value in seconds
|
||||
# (default: 50)
|
||||
#cdh_api_timeout = 50
|
||||
cdh_api_timeout = 50
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -379,14 +360,14 @@ dial_timeout = 45
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_CLH@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_CLH@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -394,22 +375,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -417,7 +399,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -425,7 +407,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -434,13 +416,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_CLH@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_CLH@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -461,22 +443,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -20,7 +20,7 @@ image = "@IMAGEPATH@"
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -102,14 +102,14 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# The size in MiB will be plused to max memory of hypervisor.
|
||||
# It is the memory address space for the NVDIMM device.
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -124,12 +124,12 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable pre allocation of VM RAM, default false
|
||||
# Enabling this will result in lower container density
|
||||
@@ -138,7 +138,7 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -146,29 +146,29 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vIOMMU, default false
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -180,7 +180,7 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_FC@"
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -202,21 +202,21 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered will scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Firecracker, it provides a built-in rate limiter, which is based on TBF(Token Bucket Filter)
|
||||
# queueing discipline.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Firecracker, it provides a built-in rate limiter, which is based on TBF(Token Bucket Filter)
|
||||
# queueing discipline.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
[factory]
|
||||
# VM templating support. Once enabled, new VMs are created from template
|
||||
@@ -230,12 +230,12 @@ disable_selinux=@DEFDISABLESELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -249,7 +249,7 @@ disable_selinux=@DEFDISABLESELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -262,14 +262,14 @@ disable_selinux=@DEFDISABLESELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 45)
|
||||
@@ -277,13 +277,13 @@ dial_timeout = 45
|
||||
|
||||
# Confidential Data Hub API timeout value in seconds
|
||||
# (default: 50)
|
||||
#cdh_api_timeout = 50
|
||||
cdh_api_timeout = 50
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -301,29 +301,29 @@ dial_timeout = 45
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_FC@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_FC@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -331,7 +331,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -339,7 +339,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -348,22 +348,22 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_FC@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_FC@
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -14,14 +14,13 @@
|
||||
path = "@QEMUCCAEXPERIMENTALPATH@"
|
||||
kernel = "@KERNELCONFIDENTIALPATH@"
|
||||
image = "@IMAGECONFIDENTIALPATH@"
|
||||
# initrd = "@INITRDCONFIDENTIALPATH@"
|
||||
machine_type = "@MACHINETYPE@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
@@ -42,7 +41,7 @@ confidential_guest = true
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -80,7 +79,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -88,12 +87,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -138,7 +138,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -151,13 +151,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -217,17 +217,17 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently only implemented
|
||||
@@ -242,7 +242,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -250,7 +250,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -267,11 +267,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -282,7 +282,7 @@ valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -297,17 +297,17 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -319,11 +319,11 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -335,7 +335,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -357,17 +357,17 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -377,9 +377,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -387,7 +388,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -398,26 +399,26 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
# In QEMU, the Realm Management Extension (RME) measurement algorithm is used for attestation, and it supports
|
||||
# sha256 and sha512 as options. The default is sha512. This algorithm is crucial for verifying the integrity of a
|
||||
# Realm, a secure execution environment within the larger system. QEMU supports sha256 and sha512 for CCA RME
|
||||
# measurements. sha512 is generally preferred on 64-bit architectures due to potential hardware acceleration.
|
||||
measurement_algo="@DEFCCAMEASUREMENTALGO@"
|
||||
measurement_algo = "@DEFCCAMEASUREMENTALGO@"
|
||||
|
||||
[factory]
|
||||
# VM templating support. Once enabled, new VMs are created from template
|
||||
@@ -431,12 +432,12 @@ measurement_algo="@DEFCCAMEASUREMENTALGO@"
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -455,17 +456,17 @@ measurement_algo="@DEFCCAMEASUREMENTALGO@"
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -479,7 +480,7 @@ measurement_algo="@DEFCCAMEASUREMENTALGO@"
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -492,14 +493,14 @@ measurement_algo="@DEFCCAMEASUREMENTALGO@"
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 90)
|
||||
@@ -509,7 +510,7 @@ dial_timeout = 90
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -527,14 +528,14 @@ dial_timeout = 90
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -542,22 +543,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -565,7 +567,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -573,7 +575,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -582,13 +584,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_TEE@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_TEE@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -609,22 +611,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -16,41 +16,18 @@
|
||||
path = "@QEMUPATH@"
|
||||
kernel = "@KERNELCONFIDENTIALPATH@"
|
||||
image = "@IMAGECONFIDENTIALPATH@"
|
||||
# initrd = "@INITRDCONFIDENTIALPATH@"
|
||||
machine_type = "@MACHINETYPE@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
# Choose AMD SEV-SNP confidential guests
|
||||
# In case of using confidential guests on AMD hardware that supports both SEV
|
||||
# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default.
|
||||
# Default false
|
||||
# sev_snp_guest = true
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -88,7 +65,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -96,12 +73,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -146,7 +124,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -159,13 +137,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -246,17 +224,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -275,7 +253,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -285,7 +263,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -293,7 +271,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -310,11 +288,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -330,7 +308,7 @@ vhost_user_reconnect_timeout_sec = 0
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -345,7 +323,7 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# This option allows to add an extra HMP or QMP socket when `enable_debug = true`
|
||||
#
|
||||
@@ -360,17 +338,17 @@ pflashes = []
|
||||
#
|
||||
# If set to the empty string "", no extra monitor socket is added. This is
|
||||
# the default.
|
||||
#extra_monitor_socket = hmp
|
||||
extra_monitor_socket = ""
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = true
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -381,24 +359,24 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# Enable hot-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port"
|
||||
#hot_plug_vfio = "root-port"
|
||||
hot_plug_vfio = "no-port"
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
# Enable cold-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port", which means disabled.
|
||||
#cold_plug_vfio = "root-port"
|
||||
cold_plug_vfio = "no-port"
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -410,7 +388,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -432,17 +410,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -452,9 +431,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -462,7 +442,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -473,20 +453,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -501,12 +481,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -525,17 +505,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -549,7 +529,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -562,14 +542,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 45)
|
||||
@@ -577,13 +557,13 @@ dial_timeout = 45
|
||||
|
||||
# Confidential Data Hub API timeout value in seconds
|
||||
# (default: 50)
|
||||
#cdh_api_timeout = 50
|
||||
cdh_api_timeout = 50
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -601,19 +581,19 @@ dial_timeout = 45
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -621,22 +601,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -644,7 +625,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -652,7 +633,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -661,13 +642,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_TEE@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_TEE@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -688,22 +669,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -23,7 +23,7 @@ machine_type = "@MACHINETYPE@"
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
@@ -47,7 +47,7 @@ sev_snp_guest = true
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -68,17 +68,17 @@ valid_hypervisor_paths = @QEMUSNPVALIDHYPERVISORPATHS@
|
||||
#
|
||||
# 96-byte, base64-encoded blob to provide the ‘ID Block’ structure for the
|
||||
# SNP_LAUNCH_FINISH command defined in the SEV-SNP firmware ABI (QEMU default: all-zero)
|
||||
#snp_id_block = ""
|
||||
snp_id_block = ""
|
||||
# 4096-byte, base64-encoded blob to provide the ‘ID Authentication Information Structure’
|
||||
# for the SNP_LAUNCH_FINISH command defined in the SEV-SNP firmware ABI (QEMU default: all-zero)
|
||||
#snp_id_auth = ""
|
||||
snp_id_auth = ""
|
||||
|
||||
# SNP Guest Policy, the ‘POLICY’ parameter to the SNP_LAUNCH_START command.
|
||||
# If unset, the QEMU default policy (0x30000) will be used.
|
||||
# Notice that the guest policy is enforced at VM launch, and your pod VMs
|
||||
# won't start at all if the policy denys it. This will be indicated by a
|
||||
# 'SNP_LAUNCH_START' error.
|
||||
#snp_guest_policy = 196608
|
||||
snp_guest_policy = 196608
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
@@ -105,7 +105,7 @@ firmware_volume = "@FIRMWARETDVFVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -113,12 +113,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -163,7 +164,7 @@ default_memory = @DEFAULTMEMORY_NV@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -176,13 +177,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -263,17 +264,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -292,7 +293,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -302,7 +303,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -310,7 +311,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -327,11 +328,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -362,17 +363,17 @@ pflashes = []
|
||||
# to enable debug output where available. And Debug also enable the hmp socket.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -384,7 +385,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
@@ -395,7 +396,7 @@ cold_plug_vfio = "@DEFAULTVFIOPORT_NV@"
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -407,7 +408,7 @@ cold_plug_vfio = "@DEFAULTVFIOPORT_NV@"
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -429,17 +430,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -449,9 +451,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -459,7 +462,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -470,20 +473,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -498,12 +501,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -522,17 +525,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -546,7 +549,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -559,14 +562,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 90)
|
||||
@@ -576,7 +579,7 @@ dial_timeout = 90
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -594,19 +597,19 @@ dial_timeout = 90
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -614,22 +617,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -637,7 +641,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -645,7 +649,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_NV@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_NV@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -654,13 +658,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_NV@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_TEE@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_TEE@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -681,22 +685,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -23,7 +23,7 @@ tdx_quote_generation_service_socket_port = @QEMUTDXQUOTEGENERATIONSERVICESOCKETP
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
@@ -44,7 +44,7 @@ confidential_guest = true
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -82,7 +82,7 @@ firmware_volume = "@FIRMWARETDVFVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -90,12 +90,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@TDXCPUFEATURES@"
|
||||
cpu_features = "@TDXCPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -140,7 +141,7 @@ default_memory = @DEFAULTMEMORY_NV@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -153,13 +154,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -240,17 +241,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -269,7 +270,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -279,7 +280,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -287,7 +288,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -304,11 +305,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -324,7 +325,7 @@ vhost_user_reconnect_timeout_sec = 0
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -339,17 +340,17 @@ pflashes = []
|
||||
# to enable debug output where available. And Debug also enable the hmp socket.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -361,7 +362,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
@@ -372,7 +373,7 @@ cold_plug_vfio = "@DEFAULTVFIOPORT_NV@"
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -384,7 +385,7 @@ cold_plug_vfio = "@DEFAULTVFIOPORT_NV@"
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -406,17 +407,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -426,9 +428,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -436,7 +439,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -447,20 +450,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -475,12 +478,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -499,17 +502,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -523,7 +526,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -536,14 +539,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 90)
|
||||
@@ -553,7 +556,7 @@ dial_timeout = 90
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -571,19 +574,19 @@ dial_timeout = 90
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -591,22 +594,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -614,7 +618,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -622,7 +626,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_NV@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_NV@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -631,13 +635,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_NV@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_TEE@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_TEE@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -658,22 +662,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -21,34 +21,12 @@ machine_type = "@MACHINETYPE@"
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
# Choose AMD SEV-SNP confidential guests
|
||||
# In case of using confidential guests on AMD hardware that supports both SEV
|
||||
# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default.
|
||||
# Default false
|
||||
# sev_snp_guest = true
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -86,7 +64,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -94,12 +72,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -144,7 +123,7 @@ default_memory = @DEFAULTMEMORY_NV@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -157,13 +136,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -244,17 +223,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -273,7 +252,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -283,7 +262,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -291,7 +270,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -308,11 +287,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -328,7 +307,7 @@ vhost_user_reconnect_timeout_sec = 0
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -343,7 +322,7 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# This option allows to add an extra HMP or QMP socket when `enable_debug = true`
|
||||
#
|
||||
@@ -358,17 +337,17 @@ pflashes = []
|
||||
#
|
||||
# If set to the empty string "", no extra monitor socket is added. This is
|
||||
# the default.
|
||||
#extra_monitor_socket = hmp
|
||||
extra_monitor_socket = ""
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = true
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -386,7 +365,7 @@ hot_plug_vfio = "@DEFAULTVFIOPORT_NV@"
|
||||
# Enable cold-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port", which means disabled.
|
||||
#cold_plug_vfio = "@DEFAULTVFIOPORT_NV@"
|
||||
cold_plug_vfio = "no-port"
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
@@ -396,7 +375,7 @@ pcie_root_port = @DEFAULTPCIEROOTPORT_NV@
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -408,7 +387,7 @@ pcie_root_port = @DEFAULTPCIEROOTPORT_NV@
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -430,17 +409,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -450,9 +430,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -460,7 +441,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -471,20 +452,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -499,12 +480,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -523,17 +504,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -547,7 +528,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -560,14 +541,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 90)
|
||||
@@ -577,7 +558,7 @@ dial_timeout = 90
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -595,19 +576,19 @@ dial_timeout = 90
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -615,22 +596,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -638,7 +620,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -646,7 +628,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_NV@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY_NV@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -655,13 +637,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY_NV@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -682,22 +664,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -35,7 +35,7 @@ confidential_guest = true
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -73,7 +73,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -81,12 +81,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -131,7 +132,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -144,13 +145,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -230,17 +231,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -259,7 +260,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -269,7 +270,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -277,7 +278,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -294,11 +295,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -309,7 +310,7 @@ valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -324,17 +325,17 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -342,10 +343,10 @@ pflashes = []
|
||||
# nvdimm is not supported when `confidential_guest = true`.
|
||||
disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
|
||||
# Enable hot-plugging of VFIO devices to a bridge-port,
|
||||
# Enable hot-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port"
|
||||
#hot_plug_vfio = "bridge-port"
|
||||
hot_plug_vfio = "no-port"
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
@@ -358,11 +359,11 @@ cold_plug_vfio = "bridge-port"
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -374,7 +375,7 @@ cold_plug_vfio = "bridge-port"
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -396,17 +397,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -416,9 +418,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -426,7 +429,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -437,20 +440,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -465,12 +468,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -489,17 +492,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -513,7 +516,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -526,14 +529,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 30)
|
||||
@@ -543,7 +546,7 @@ dial_timeout = 90
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -561,19 +564,19 @@ dial_timeout = 90
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -581,22 +584,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -604,7 +608,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -612,7 +616,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -621,13 +625,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -648,22 +652,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE_SE@"
|
||||
vfio_mode = "@DEFVFIOMODE_SE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
[hypervisor.qemu]
|
||||
path = "@QEMUPATH@"
|
||||
kernel = "@KERNELCONFIDENTIALPATH@"
|
||||
#image = "@IMAGEPATH@"
|
||||
initrd = "@INITRDCONFIDENTIALPATH@"
|
||||
machine_type = "@MACHINETYPE@"
|
||||
|
||||
@@ -23,7 +22,7 @@ machine_type = "@MACHINETYPE@"
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
@@ -47,7 +46,7 @@ sev_snp_guest = true
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -68,17 +67,17 @@ valid_hypervisor_paths = @QEMUVALIDHYPERVISORPATHS@
|
||||
#
|
||||
# 96-byte, base64-encoded blob to provide the ‘ID Block’ structure for the
|
||||
# SNP_LAUNCH_FINISH command defined in the SEV-SNP firmware ABI (QEMU default: all-zero)
|
||||
#snp_id_block = ""
|
||||
snp_id_block = ""
|
||||
# 4096-byte, base64-encoded blob to provide the ‘ID Authentication Information Structure’
|
||||
# for the SNP_LAUNCH_FINISH command defined in the SEV-SNP firmware ABI (QEMU default: all-zero)
|
||||
#snp_id_auth = ""
|
||||
snp_id_auth = ""
|
||||
|
||||
# SNP Guest Policy, the ‘POLICY’ parameter to the SNP_LAUNCH_START command.
|
||||
# If unset, the QEMU default policy (0x30000) will be used.
|
||||
# Notice that the guest policy is enforced at VM launch, and your pod VMs
|
||||
# won't start at all if the policy denys it. This will be indicated by a
|
||||
# 'SNP_LAUNCH_START' error.
|
||||
#snp_guest_policy = 196608
|
||||
snp_guest_policy = 196608
|
||||
|
||||
# Optional space-separated list of options to pass to the guest kernel.
|
||||
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
||||
@@ -105,7 +104,7 @@ firmware_volume = "@FIRMWARETDVFVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -113,12 +112,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -163,7 +163,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -176,13 +176,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -263,17 +263,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -292,7 +292,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -302,7 +302,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -310,7 +310,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -327,11 +327,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -362,7 +362,7 @@ pflashes = []
|
||||
# to enable debug output where available. And Debug also enable the hmp socket.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
@@ -372,7 +372,7 @@ disable_nesting_checks = true
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -384,11 +384,11 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -400,7 +400,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -422,17 +422,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -442,9 +443,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -452,7 +454,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -463,20 +465,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -491,12 +493,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -515,17 +517,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -539,7 +541,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -552,14 +554,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 90)
|
||||
@@ -569,7 +571,7 @@ dial_timeout = 90
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -587,19 +589,19 @@ dial_timeout = 90
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -607,22 +609,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -630,7 +633,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -638,7 +641,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -647,13 +650,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_TEE@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_TEE@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -674,22 +677,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
path = "@QEMUTDXPATH@"
|
||||
kernel = "@KERNELCONFIDENTIALPATH@"
|
||||
image = "@IMAGECONFIDENTIALPATH@"
|
||||
# initrd = "@INITRDPATH@"
|
||||
machine_type = "@MACHINETYPE@"
|
||||
tdx_quote_generation_service_socket_port = @QEMUTDXQUOTEGENERATIONSERVICESOCKETPORT@
|
||||
|
||||
@@ -23,7 +22,7 @@ tdx_quote_generation_service_socket_port = @QEMUTDXQUOTEGENERATIONSERVICESOCKETP
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
@@ -44,7 +43,7 @@ confidential_guest = true
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -82,7 +81,7 @@ firmware_volume = "@FIRMWARETDVFVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -90,12 +89,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@TDXCPUFEATURES@"
|
||||
cpu_features = "@TDXCPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -140,7 +140,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -153,13 +153,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -240,17 +240,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -269,7 +269,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -279,7 +279,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -287,7 +287,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -304,11 +304,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -324,7 +324,7 @@ vhost_user_reconnect_timeout_sec = 0
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -339,17 +339,17 @@ pflashes = []
|
||||
# to enable debug output where available. And Debug also enable the hmp socket.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -361,11 +361,11 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -377,7 +377,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -399,17 +399,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -419,9 +420,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -429,7 +431,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -440,20 +442,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -468,12 +470,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -492,17 +494,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -516,7 +518,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -529,14 +531,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 60)
|
||||
@@ -546,7 +548,7 @@ dial_timeout = 60
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -564,19 +566,19 @@ dial_timeout = 60
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -584,22 +586,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -607,7 +610,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -615,7 +618,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -624,13 +627,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_TEE@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_TEE@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -651,22 +654,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -15,41 +15,18 @@
|
||||
path = "@QEMUPATH@"
|
||||
kernel = "@KERNELPATH@"
|
||||
image = "@IMAGEPATH@"
|
||||
# initrd = "@INITRDPATH@"
|
||||
machine_type = "@MACHINETYPE@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
# - ext4 (default)
|
||||
# - xfs
|
||||
# - erofs
|
||||
rootfs_type=@DEFROOTFSTYPE@
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
# Choose AMD SEV-SNP confidential guests
|
||||
# In case of using confidential guests on AMD hardware that supports both SEV
|
||||
# and SEV-SNP, the following enables SEV-SNP guests. SEV guests are default.
|
||||
# Default false
|
||||
# sev_snp_guest = true
|
||||
rootfs_type = @DEFROOTFSTYPE@
|
||||
|
||||
# Enable running QEMU VMM as a non-root user.
|
||||
# By default QEMU VMM run as root. When this is set to true, QEMU VMM process runs as
|
||||
# a non-root random user. See documentation for the limitations of this mode.
|
||||
# rootless = true
|
||||
rootless = false
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
@@ -87,7 +64,7 @@ firmware_volume = "@FIRMWAREVOLUMEPATH@"
|
||||
# Machine accelerators
|
||||
# comma-separated list of machine accelerators to pass to the hypervisor.
|
||||
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
|
||||
machine_accelerators="@MACHINEACCELERATORS@"
|
||||
machine_accelerators = "@MACHINEACCELERATORS@"
|
||||
|
||||
# Qemu seccomp sandbox feature
|
||||
# comma-separated list of seccomp sandbox features to control the syscall access.
|
||||
@@ -95,12 +72,13 @@ machine_accelerators="@MACHINEACCELERATORS@"
|
||||
# Note: "elevateprivileges=deny" doesn't work with daemonize option, so it's removed from the seccomp sandbox
|
||||
# Another note: enabling this feature may reduce performance, you may enable
|
||||
# /proc/sys/net/core/bpf_jit_enable to reduce the impact. see https://man7.org/linux/man-pages/man8/bpfc.8.html
|
||||
#seccompsandbox="@DEFSECCOMPSANDBOXPARAM@"
|
||||
# Recommended value when enabling: "on,obsolete=deny,spawn=deny,resourcecontrol=deny"
|
||||
seccompsandbox = "@DEFSECCOMPSANDBOXPARAM@"
|
||||
|
||||
# CPU features
|
||||
# comma-separated list of cpu features to pass to the cpu
|
||||
# For example, `cpu_features = "pmu=off,vmx=off"
|
||||
cpu_features="@CPUFEATURES@"
|
||||
cpu_features = "@CPUFEATURES@"
|
||||
|
||||
# Default number of vCPUs per SB/VM:
|
||||
# unspecified or 0 --> will be set to @DEFVCPUS@
|
||||
@@ -145,7 +123,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -158,13 +136,13 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Specifies virtio-mem will be enabled or not.
|
||||
# Please note that this option should be used with the command
|
||||
# "echo 1 > /proc/sys/vm/overcommit_memory".
|
||||
# Default false
|
||||
#enable_virtio_mem = true
|
||||
enable_virtio_mem = false
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -245,17 +223,17 @@ block_device_aio = "@DEFBLOCKDEVICEAIO_QEMU@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable iothreads (data-plane) to be used. This causes IO to be
|
||||
# handled in a separate IO thread. This is currently implemented
|
||||
@@ -274,7 +252,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# upfront or in the cases where you want memory latencies
|
||||
# to be very predictable
|
||||
# Default false
|
||||
#enable_mem_prealloc = true
|
||||
enable_mem_prealloc = false
|
||||
|
||||
# Reclaim guest freed memory.
|
||||
# Enabling this will result in the VM balloon device having f_reporting=on set.
|
||||
@@ -284,7 +262,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# the VM.
|
||||
#
|
||||
# Default false
|
||||
#reclaim_guest_freed_memory = true
|
||||
reclaim_guest_freed_memory = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -292,7 +270,7 @@ indep_iothreads = @DEFINDEPIOTHREADS@
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vhost-user storage device, default false
|
||||
# Enabling this will result in some Linux reserved block type
|
||||
@@ -309,11 +287,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# Enable IOMMU_PLATFORM, default false
|
||||
# Enabling this will result in the VM device having iommu_platform=on set
|
||||
#enable_iommu_platform = true
|
||||
enable_iommu_platform = false
|
||||
|
||||
# List of valid annotations values for the vhost user store path
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -329,7 +307,7 @@ vhost_user_reconnect_timeout_sec = 0
|
||||
# will disable this feature. In the case of virtio-fs, this is enabled
|
||||
# automatically and '/dev/shm' is used as the backing folder.
|
||||
# This option will be ignored if VM templating is enabled.
|
||||
#file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
file_mem_backend = "@DEFFILEMEMBACKEND@"
|
||||
|
||||
# List of valid annotations values for the file_mem_backend annotation
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -344,7 +322,7 @@ pflashes = []
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# This option allows to add an extra HMP or QMP socket when `enable_debug = true`
|
||||
#
|
||||
@@ -359,17 +337,17 @@ pflashes = []
|
||||
#
|
||||
# If set to the empty string "", no extra monitor socket is added. This is
|
||||
# the default.
|
||||
#extra_monitor_socket = hmp
|
||||
extra_monitor_socket = ""
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
# This is the msize used for 9p shares. It is the number of bytes
|
||||
# used for 9p packet payload.
|
||||
#msize_9p = @DEFMSIZE9P@
|
||||
msize_9p = @DEFMSIZE9P@
|
||||
|
||||
# If false and nvdimm is supported, use nvdimm device to plug guest image.
|
||||
# Otherwise virtio-block device is used.
|
||||
@@ -380,24 +358,24 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# Enable hot-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port"
|
||||
#hot_plug_vfio = "root-port"
|
||||
hot_plug_vfio = "no-port"
|
||||
|
||||
# In a confidential compute environment hot-plugging can compromise
|
||||
# security.
|
||||
# Enable cold-plugging of VFIO devices to a bridge-port,
|
||||
# root-port or switch-port.
|
||||
# The default setting is "no-port", which means disabled.
|
||||
#cold_plug_vfio = "root-port"
|
||||
cold_plug_vfio = "no-port"
|
||||
|
||||
# Before hot plugging a PCIe device, you need to add a pcie_root_port device.
|
||||
# Use this parameter when using some large PCI bar devices, such as Nvidia GPU
|
||||
# The value means the number of pcie_root_port
|
||||
# Default 0
|
||||
#pcie_root_port = 2
|
||||
pcie_root_port = 0
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
disable_vhost_net = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -409,7 +387,7 @@ disable_image_nvdimm = @DEFDISABLEIMAGENVDIMM@
|
||||
# The source of entropy /dev/urandom is non-blocking and provides a
|
||||
# generally acceptable source of entropy. It should work well for pretty much
|
||||
# all practical purposes.
|
||||
#entropy_source= "@DEFENTROPYSOURCE@"
|
||||
entropy_source= "@DEFENTROPYSOURCE@"
|
||||
|
||||
# List of valid annotations values for entropy_source
|
||||
# The default if not set is empty (all annotations rejected.)
|
||||
@@ -431,17 +409,18 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
#
|
||||
# Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#rx_rate_limiter_max_rate = 0
|
||||
rx_rate_limiter_max_rate = 0
|
||||
# Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
|
||||
# In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
|
||||
# to discipline traffic.
|
||||
# Default 0-sized value means unlimited rate.
|
||||
#tx_rate_limiter_max_rate = 0
|
||||
tx_rate_limiter_max_rate = 0
|
||||
|
||||
# Set where to save the guest memory dump file.
|
||||
# If set, when GUEST_PANICKED event occurred,
|
||||
@@ -451,9 +430,10 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# The dumped file(also called vmcore) can be processed with crash or gdb.
|
||||
#
|
||||
# WARNING:
|
||||
# Dump guest’s memory can take very long depending on the amount of guest memory
|
||||
# Dump guest's memory can take very long depending on the amount of guest memory
|
||||
# and use much disk space.
|
||||
#guest_memory_dump_path="/var/crash/kata"
|
||||
# Recommended value when enabling: "/var/crash/kata"
|
||||
guest_memory_dump_path = ""
|
||||
|
||||
# If enable paging.
|
||||
# Basically, if you want to use "gdb" rather than "crash",
|
||||
@@ -461,7 +441,7 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# then you should enable paging.
|
||||
#
|
||||
# See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
|
||||
#guest_memory_dump_paging=false
|
||||
guest_memory_dump_paging = false
|
||||
|
||||
# Enable swap in the guest. Default false.
|
||||
# When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
|
||||
@@ -472,20 +452,20 @@ valid_entropy_sources = @DEFVALIDENTROPYSOURCES@
|
||||
# If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
|
||||
# If swap_in_bytes and memory_limit_in_bytes is not set, the size should
|
||||
# be default_memory.
|
||||
#enable_guest_swap = true
|
||||
enable_guest_swap = false
|
||||
|
||||
# use legacy serial for guest console if available and implemented for architecture. Default false
|
||||
#use_legacy_serial = true
|
||||
use_legacy_serial = false
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
||||
# with `SELINUX=yes`.
|
||||
# (default: true)
|
||||
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
|
||||
|
||||
[factory]
|
||||
@@ -500,12 +480,12 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
# Specifies the path of template.
|
||||
#
|
||||
# Default "/run/vc/vm/template"
|
||||
#template_path = "/run/vc/vm/template"
|
||||
template_path = "/run/vc/vm/template"
|
||||
|
||||
# The number of caches of VMCache:
|
||||
# unspecified or == 0 --> VMCache is disabled
|
||||
@@ -524,17 +504,17 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# a new sandbox.
|
||||
#
|
||||
# Default 0
|
||||
#vm_cache_number = 0
|
||||
vm_cache_number = 0
|
||||
|
||||
# Specify the address of the Unix socket that is used by VMCache.
|
||||
#
|
||||
# Default /var/run/kata-containers/cache.sock
|
||||
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -548,7 +528,7 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -561,14 +541,14 @@ disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
||||
# * The module is not available in the guest or it doesn't met the guest kernel
|
||||
# requirements, like architecture and version.
|
||||
#
|
||||
kernel_modules=[]
|
||||
kernel_modules = []
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 45)
|
||||
@@ -576,13 +556,13 @@ dial_timeout = 45
|
||||
|
||||
# Confidential Data Hub API timeout value in seconds
|
||||
# (default: 50)
|
||||
#cdh_api_timeout = 50
|
||||
cdh_api_timeout = 50
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -600,19 +580,19 @@ dial_timeout = 45
|
||||
# Uses tc filter rules to redirect traffic from the network interface
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
internetworking_model="@DEFNETWORKMODEL_QEMU@"
|
||||
internetworking_model = "@DEFNETWORKMODEL_QEMU@"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
||||
# within the guest
|
||||
# (default: true)
|
||||
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
# enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = false
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -620,22 +600,23 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -643,7 +624,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -651,7 +632,7 @@ disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -660,13 +641,13 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@
|
||||
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT@
|
||||
|
||||
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
||||
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
||||
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
||||
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
||||
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
sandbox_bind_mounts = @DEFBINDMOUNTS@
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -687,22 +668,22 @@ sandbox_bind_mounts=@DEFBINDMOUNTS@
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
||||
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -16,24 +16,6 @@
|
||||
remote_hypervisor_socket = "/run/peerpod/hypervisor.sock"
|
||||
remote_hypervisor_timeout = 600
|
||||
|
||||
|
||||
# Enable confidential guest support.
|
||||
# Toggling that setting may trigger different hardware features, ranging
|
||||
# from memory encryption to both memory and CPU-state encryption and integrity.
|
||||
# The Kata Containers runtime dynamically detects the available feature set and
|
||||
# aims at enabling the largest possible one, returning an error if none is
|
||||
# available, or none is supported by the hypervisor.
|
||||
#
|
||||
# Known limitations:
|
||||
# * Does not work by design:
|
||||
# - CPU Hotplug
|
||||
# - Memory Hotplug
|
||||
# - NVDIMM devices
|
||||
#
|
||||
# Default false
|
||||
# confidential_guest = true
|
||||
|
||||
|
||||
# List of valid annotation names for the hypervisor
|
||||
# Each member of the list is a regular expression, which is the base name
|
||||
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
||||
@@ -102,13 +84,13 @@ default_bridges = @DEFBRIDGES@
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
# Note: the remote hypervisor uses the peer pod config to determine the memory of the VM
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available. And Debug also enable the hmp socket.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Path to OCI hook binaries in the *guest rootfs*.
|
||||
# This does not affect host-side hooks which must instead be added to
|
||||
@@ -125,10 +107,11 @@ default_bridges = @DEFBRIDGES@
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux=@DEFDISABLESELINUX@
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
|
||||
# disable applying SELinux on the container process
|
||||
# If set to false, the type `container_t` is applied to the container process by default.
|
||||
@@ -141,7 +124,7 @@ disable_guest_selinux = true
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -155,24 +138,24 @@ disable_guest_selinux = true
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Enable debug console.
|
||||
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 30)
|
||||
#dial_timeout = 30
|
||||
dial_timeout = 30
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -191,7 +174,7 @@ disable_guest_selinux = true
|
||||
# provided by plugin to a tap interface connected to the VM.
|
||||
#
|
||||
# Note: The remote hypervisor, uses it's own network, so "none" is required
|
||||
internetworking_model="none"
|
||||
internetworking_model = "none"
|
||||
|
||||
# disable guest seccomp
|
||||
# Determines whether container seccomp profiles are passed to the virtual
|
||||
@@ -199,7 +182,7 @@ internetworking_model="none"
|
||||
# within the guest
|
||||
# (default: true)
|
||||
# Note: The remote hypervisor has a different guest, so currently requires this to be set to true
|
||||
disable_guest_seccomp=true
|
||||
disable_guest_seccomp = true
|
||||
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
@@ -208,22 +191,23 @@ disable_guest_seccomp=true
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -240,7 +224,7 @@ disable_new_netns = true
|
||||
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
||||
# The sandbox cgroup is constrained if there is no container type annotation.
|
||||
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
||||
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
|
||||
|
||||
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
||||
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
||||
@@ -250,7 +234,7 @@ sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
||||
# does not yet support sandbox sizing annotations.
|
||||
# - When running single containers using a tool like ctr, container sizing information will be available.
|
||||
# Note: the remote hypervisor uses the peer pod config to determine the sandbox size, so requires this to be set to true
|
||||
static_sandbox_resource_mgmt=true
|
||||
static_sandbox_resource_mgmt = true
|
||||
|
||||
# VFIO Mode
|
||||
# Determines how VFIO devices should be be presented to the container.
|
||||
@@ -271,23 +255,23 @@ static_sandbox_resource_mgmt=true
|
||||
# Using this mode requires specially built workloads that know how
|
||||
# to locate the relevant device interfaces within the VM.
|
||||
#
|
||||
vfio_mode="@DEFVFIOMODE@"
|
||||
vfio_mode = "@DEFVFIOMODE@"
|
||||
|
||||
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
||||
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
||||
# Note: remote hypervisor has no sharing of emptydir mounts from host to guest
|
||||
disable_guest_empty_dir=false
|
||||
disable_guest_empty_dir = false
|
||||
|
||||
# Enabled experimental feature list, format: ["a", "b"].
|
||||
# Experimental features are features not stable enough for production,
|
||||
# they may break compatibility, and are prepared for a big version bump.
|
||||
# Supported experimental features:
|
||||
# (default: [])
|
||||
experimental=@DEFAULTEXPFEATURES@
|
||||
experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
# enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
[hypervisor.stratovirt]
|
||||
path = "@STRATOVIRTPATH@"
|
||||
kernel = "@KERNELPATH_STRATOVIRT@"
|
||||
#image = "@IMAGEPATH@"
|
||||
initrd = "@INITRDPATH@"
|
||||
image = "@IMAGEPATH@"
|
||||
machine_type = "@DEFMACHINETYPE_STRATOVIRT@"
|
||||
|
||||
# rootfs filesystem type:
|
||||
@@ -89,7 +88,7 @@ default_memory = @DEFMEMSZ@
|
||||
# Default memory slots per SB/VM.
|
||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
||||
#memory_slots = @DEFMEMSLOTS@
|
||||
memory_slots = @DEFMEMSLOTS@
|
||||
|
||||
# Default maximum memory in MiB per SB / VM
|
||||
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
||||
@@ -102,7 +101,7 @@ default_maxmemory = @DEFMAXMEMSZ@
|
||||
# If set block storage driver (block_device_driver) to "nvdimm",
|
||||
# should set memory_offset to the size of block device.
|
||||
# Default 0
|
||||
#memory_offset = 0
|
||||
memory_offset = 0
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
@@ -164,17 +163,17 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_STRATOVIRT@"
|
||||
|
||||
# Specifies cache-related options will be set to block devices or not.
|
||||
# Default false
|
||||
#block_device_cache_set = true
|
||||
block_device_cache_set = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
||||
# Default false
|
||||
#block_device_cache_direct = true
|
||||
block_device_cache_direct = false
|
||||
|
||||
# Specifies cache-related options for block devices.
|
||||
# Denotes whether flush requests for the device are ignored.
|
||||
# Default false
|
||||
#block_device_cache_noflush = true
|
||||
block_device_cache_noflush = false
|
||||
|
||||
# Enable huge pages for VM RAM, default false
|
||||
# Enabling this will result in the VM memory
|
||||
@@ -182,25 +181,25 @@ block_device_driver = "@DEFBLOCKSTORAGEDRIVER_STRATOVIRT@"
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
enable_hugepages = false
|
||||
|
||||
# Enable vIOMMU, default false
|
||||
# Enabling this will result in the VM having a vIOMMU device
|
||||
# This will also add the following options to the kernel's
|
||||
# command line: intel_iommu=on,iommu=pt
|
||||
#enable_iommu = true
|
||||
enable_iommu = false
|
||||
|
||||
# This option changes the default hypervisor and kernel parameters
|
||||
# to enable debug output where available.
|
||||
#
|
||||
# Default false
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Disable the customizations done in the runtime when it detects
|
||||
# that it is running on top a VMM. This will result in the runtime
|
||||
# behaving as it would when running on bare metal.
|
||||
#
|
||||
#disable_nesting_checks = true
|
||||
disable_nesting_checks = false
|
||||
|
||||
#
|
||||
# Default entropy source.
|
||||
@@ -229,7 +228,8 @@ entropy_source = "@DEFENTROPYSOURCE@"
|
||||
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
||||
# Warnings will be logged if any error is encountered while scanning for hooks,
|
||||
# but it will not abort container execution.
|
||||
#guest_hook_path = "/usr/share/oci/hooks"
|
||||
# Recommended value when enabling: "/usr/share/oci/hooks"
|
||||
guest_hook_path = ""
|
||||
|
||||
# disable applying SELinux on the VMM process (default false)
|
||||
disable_selinux = @DEFDISABLESELINUX@
|
||||
@@ -253,12 +253,12 @@ disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
# Note: Requires "initrd=" to be set ("image=" is not supported).
|
||||
#
|
||||
# Default false
|
||||
#enable_template = true
|
||||
enable_template = false
|
||||
|
||||
[agent.@PROJECT_TYPE@]
|
||||
# If enabled, make the agent display debug-level messages.
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
|
||||
# Enable agent tracing.
|
||||
#
|
||||
@@ -272,7 +272,7 @@ disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
|
||||
# increasing the container shutdown time slightly.
|
||||
#
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Comma separated list of kernel modules and their parameters.
|
||||
# These modules will be loaded in the guest kernel using modprobe(8).
|
||||
@@ -292,7 +292,7 @@ kernel_modules = []
|
||||
# If enabled, user can connect guest OS running inside hypervisor
|
||||
# through "kata-runtime exec <sandbox-id>" command
|
||||
|
||||
#debug_console_enabled = true
|
||||
debug_console_enabled = false
|
||||
|
||||
# Agent connection dialing timeout value in seconds
|
||||
# (default: 45)
|
||||
@@ -300,13 +300,13 @@ dial_timeout = 45
|
||||
|
||||
# Confidential Data Hub API timeout value in seconds
|
||||
# (default: 50)
|
||||
#cdh_api_timeout = 50
|
||||
cdh_api_timeout = 50
|
||||
|
||||
[runtime]
|
||||
# If enabled, the runtime will log additional debug messages to the
|
||||
# system log
|
||||
# (default: disabled)
|
||||
#enable_debug = true
|
||||
enable_debug = false
|
||||
#
|
||||
# Internetworking model
|
||||
# Determines how the VM should be connected to the
|
||||
@@ -336,7 +336,7 @@ disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
# vCPUs pinning settings
|
||||
# if enabled, each vCPU thread will be scheduled to a fixed CPU
|
||||
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
|
||||
#enable_vcpus_pinning = false
|
||||
enable_vcpus_pinning = true
|
||||
|
||||
# Apply a custom SELinux security policy to the container process inside the VM.
|
||||
# This is used when you want to apply a type other than the default `container_t`,
|
||||
@@ -344,22 +344,23 @@ disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
# (format: "user:role:type")
|
||||
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
||||
# categories are determined automatically by high-level container runtimes such as containerd.
|
||||
#guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
# Example value when enabling: "system_u:system_r:container_t"
|
||||
guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
|
||||
|
||||
# If enabled, the runtime will create opentracing.io traces and spans.
|
||||
# (See https://www.jaegertracing.io/docs/getting-started).
|
||||
# (default: disabled)
|
||||
#enable_tracing = true
|
||||
enable_tracing = false
|
||||
|
||||
# Set the full url to the Jaeger HTTP Thrift collector.
|
||||
# The default if not set will be "http://localhost:14268/api/traces"
|
||||
#jaeger_endpoint = ""
|
||||
jaeger_endpoint = ""
|
||||
|
||||
# Sets the username to be used if basic auth is required for Jaeger.
|
||||
#jaeger_user = ""
|
||||
jaeger_user = ""
|
||||
|
||||
# Sets the password to be used if basic auth is required for Jaeger.
|
||||
#jaeger_password = ""
|
||||
jaeger_password = ""
|
||||
|
||||
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
||||
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
||||
@@ -367,7 +368,7 @@ disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
|
||||
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
||||
# (like OVS) directly.
|
||||
# (default: false)
|
||||
#disable_new_netns = true
|
||||
disable_new_netns = false
|
||||
|
||||
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
||||
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
||||
@@ -399,7 +400,7 @@ experimental = @DEFAULTEXPFEATURES@
|
||||
|
||||
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
||||
# (default: false)
|
||||
#enable_pprof = true
|
||||
enable_pprof = false
|
||||
|
||||
# Indicates the CreateContainer request timeout needed for the workload(s)
|
||||
# It using guest_pull this includes the time to pull the image inside the guest
|
||||
|
||||
@@ -615,7 +615,20 @@ func addHypervisorPathOverrides(ocispec specs.Spec, config *vc.SandboxConfig, ru
|
||||
if value, ok := ocispec.Annotations[vcAnnotations.KernelParams]; ok {
|
||||
if value != "" {
|
||||
params := vc.DeserializeParams(strings.Fields(value))
|
||||
|
||||
// Annotation parameters should replace existing parameters with the same key
|
||||
// rather than append, to allow overriding default values
|
||||
for _, param := range params {
|
||||
// Remove any existing parameter with the same key
|
||||
var newParams []vc.Param
|
||||
for _, existingParam := range config.HypervisorConfig.KernelParams {
|
||||
if existingParam.Key != param.Key {
|
||||
newParams = append(newParams, existingParam)
|
||||
}
|
||||
}
|
||||
config.HypervisorConfig.KernelParams = newParams
|
||||
|
||||
// Now add the annotation parameter
|
||||
if err := config.HypervisorConfig.AddKernelParam(param); err != nil {
|
||||
return fmt.Errorf("Error adding kernel parameters in annotation kernel_params : %v", err)
|
||||
}
|
||||
|
||||
@@ -54,14 +54,16 @@ function ci_config() {
|
||||
if [ "$ID" == ubuntu ]; then
|
||||
# https://github.com/kata-containers/tests/issues/352
|
||||
if [ -n "${FACTORY_TEST}" ]; then
|
||||
sudo sed -i -e 's/^#enable_template.*$/enable_template = true/g' "${kata_config}"
|
||||
# Handle both commented and uncommented enable_template
|
||||
sudo sed -i -e 's/^#\?enable_template.*$/enable_template = true/g' "${kata_config}"
|
||||
echo "init vm template"
|
||||
sudo -E PATH=$PATH "$RUNTIME" factory init
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "enable debug for kata-runtime"
|
||||
sudo sed -i 's/^#enable_debug =/enable_debug =/g' ${kata_config}
|
||||
# Handle both commented and uncommented enable_debug
|
||||
sudo sed -i 's/^#\?enable_debug = .*$/enable_debug = true/g' ${kata_config}
|
||||
}
|
||||
|
||||
function ci_cleanup() {
|
||||
@@ -285,11 +287,13 @@ function PrepareContainerMemoryUpdate() {
|
||||
fi
|
||||
info "Test container memory update with virtio-mem"
|
||||
|
||||
sudo sed -i -e 's/^#enable_virtio_mem.*$/enable_virtio_mem = true/g' "${kata_config}"
|
||||
# Handle both commented and uncommented enable_virtio_mem
|
||||
sudo sed -i -e 's/^#\?enable_virtio_mem.*$/enable_virtio_mem = true/g' "${kata_config}"
|
||||
else
|
||||
info "Test container memory update without virtio-mem"
|
||||
|
||||
sudo sed -i -e 's/^enable_virtio_mem.*$/#enable_virtio_mem = true/g' "${kata_config}"
|
||||
# Set to false instead of commenting out
|
||||
sudo sed -i -e 's/^#\?enable_virtio_mem.*$/enable_virtio_mem = false/g' "${kata_config}"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -347,7 +351,8 @@ function TestContainerSwap() {
|
||||
info "Test container with guest swap"
|
||||
|
||||
create_containerd_config "kata-${KATA_HYPERVISOR}" 1
|
||||
sudo sed -i -e 's/^#enable_guest_swap.*$/enable_guest_swap = true/g' "${kata_config}"
|
||||
# Handle both commented and uncommented enable_guest_swap
|
||||
sudo sed -i -e 's/^#\?enable_guest_swap.*$/enable_guest_swap = true/g' "${kata_config}"
|
||||
|
||||
# Test without swap device
|
||||
testContainerStart
|
||||
|
||||
@@ -33,38 +33,36 @@ info() {
|
||||
echo "INFO: $msg" >&2
|
||||
}
|
||||
|
||||
# Check if a value exists within a specific field in the config file
|
||||
# * field_contains_value "${config}" "kernel_params" "agent.log=debug"
|
||||
field_contains_value() {
|
||||
local config_file="$1"
|
||||
local field="$2"
|
||||
local value="$3"
|
||||
# Use word boundaries (\b) to match complete parameters, not substrings
|
||||
# This handles space-separated values like kernel_params = "param1 param2 param3"
|
||||
grep -qE "^${field}[^=]*=.*[[:space:]\"](${value})([[:space:]\"]|$)" "${config_file}"
|
||||
}
|
||||
|
||||
# Get existing values from a TOML array field and return them as a comma-separated string
|
||||
# * get_field_array_values "${config}" "enable_annotations"
|
||||
# * get_field_array_values "${config}" "enable_annotations" "${shim}"
|
||||
get_field_array_values() {
|
||||
local config_file="$1"
|
||||
local field="$2"
|
||||
# Extract values from field = ["val1", "val2", ...] format
|
||||
grep "^${field} = " "${config_file}" | sed "s/^${field} = \[\(.*\)\]/\1/" | sed 's/"//g' | sed 's/, /,/g'
|
||||
}
|
||||
|
||||
# Check if a boolean config is already set to true
|
||||
config_is_true() {
|
||||
local config_file="$1"
|
||||
local key="$2"
|
||||
grep -qE "^${key}\s*=\s*true" "${config_file}"
|
||||
}
|
||||
|
||||
# Check if a string value already exists anywhere in the file (literal match)
|
||||
string_exists_in_file() {
|
||||
local file_path="$1"
|
||||
local string="$2"
|
||||
grep -qF "${string}" "${file_path}"
|
||||
local shim="${3:-}"
|
||||
|
||||
# Determine hypervisor name if shim is provided
|
||||
local hypervisor_name=""
|
||||
if [[ -n "${shim}" ]]; then
|
||||
hypervisor_name=$(get_hypervisor_name "${shim}")
|
||||
fi
|
||||
|
||||
# Get array values using tomlq - output each element on a new line, then convert to comma-separated
|
||||
local array_values=""
|
||||
if [[ -n "${hypervisor_name}" ]]; then
|
||||
array_values=$(tomlq -r '.hypervisor.'"${hypervisor_name}"'.'"${field}"' // [] | .[]' "${config_file}" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Fallback: try without hypervisor prefix (for top-level fields)
|
||||
if [[ -z "${array_values}" ]] || [[ "${array_values}" == "null" ]]; then
|
||||
array_values=$(tomlq -r '."'"${field}"'" // [] | .[]' "${config_file}" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Convert newline-separated values to comma-separated string
|
||||
if [[ -n "${array_values}" ]]; then
|
||||
echo "${array_values}" | tr '\n' ',' | sed 's/,$//'
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
DEBUG="${DEBUG:-"false"}"
|
||||
@@ -474,8 +472,11 @@ function tdx_supported() {
|
||||
version="${2}"
|
||||
config="${3}"
|
||||
|
||||
sed -i -e "s|PLACEHOLDER_FOR_DISTRO_QEMU_WITH_TDX_SUPPORT|$(get_tdx_qemu_path_from_distro ${distro})|g" ${config}
|
||||
sed -i -e "s|PLACEHOLDER_FOR_DISTRO_OVMF_WITH_TDX_SUPPORT|$(get_tdx_ovmf_path_from_distro ${distro})|g" ${config}
|
||||
local qemu_path=$(get_tdx_qemu_path_from_distro ${distro})
|
||||
local ovmf_path=$(get_tdx_ovmf_path_from_distro ${distro})
|
||||
|
||||
tomlq -i -t '.hypervisor.qemu.path = "'"${qemu_path}"'"' "${config}" 2>/dev/null || true
|
||||
tomlq -i -t '.hypervisor.qemu.firmware = "'"${ovmf_path}"'"' "${config}" 2>/dev/null || true
|
||||
|
||||
info "In order to use the tdx related runtime classes, ensure TDX is properly configured for ${distro} ${version} by following the instructions provided at: $(get_tdx_distro_instructions ${distro})"
|
||||
}
|
||||
@@ -558,9 +559,38 @@ EOF
|
||||
chmod +x ${qemu_binary_script_host_path}
|
||||
fi
|
||||
|
||||
if ! string_exists_in_file "${config_path}" "${qemu_binary_script}"; then
|
||||
sed -i -e "s|${qemu_binary}|${qemu_binary_script}|" ${config_path}
|
||||
fi
|
||||
tomlq -i -t '.hypervisor.qemu.path = "'"${qemu_binary_script}"'"' "${config_path}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
function get_hypervisor_name() {
|
||||
local shim="${1}"
|
||||
case "${shim}" in
|
||||
qemu-runtime-rs | qemu-coco-dev-runtime-rs | qemu-se-runtime-rs | qemu | qemu-tdx | qemu-snp | qemu-se | qemu-coco-dev | qemu-cca | qemu-nvidia-gpu | qemu-nvidia-gpu-tdx | qemu-nvidia-gpu-snp)
|
||||
echo "qemu"
|
||||
;;
|
||||
clh)
|
||||
echo "clh"
|
||||
;;
|
||||
cloud-hypervisor)
|
||||
echo "cloud-hypervisor"
|
||||
;;
|
||||
dragonball)
|
||||
echo "dragonball"
|
||||
;;
|
||||
fc | firecracker)
|
||||
echo "firecracker"
|
||||
;;
|
||||
stratovirt)
|
||||
echo "stratovirt"
|
||||
;;
|
||||
remote)
|
||||
echo "remote"
|
||||
;;
|
||||
*)
|
||||
# Default to the shim name itself if no specific mapping
|
||||
echo "${shim}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function install_artifacts() {
|
||||
@@ -579,6 +609,7 @@ function install_artifacts() {
|
||||
mkdir -p "$config_path"
|
||||
|
||||
local kata_config_file="${config_path}/configuration-${shim}.toml"
|
||||
|
||||
# Till deprecation period is over, we need to support:
|
||||
# * "http://proxy:8080" (applies to all shims)
|
||||
# * per-shim format: "qemu-tdx=http://proxy:8080;qemu-snp=http://proxy2:8080"
|
||||
@@ -603,8 +634,14 @@ function install_artifacts() {
|
||||
fi
|
||||
|
||||
if [[ -n "${https_proxy_value}" ]]; then
|
||||
if ! field_contains_value "${kata_config_file}" "kernel_params" "agent.https_proxy"; then
|
||||
sed -i -e 's|^kernel_params = "\(.*\)"|kernel_params = "\1 agent.https_proxy='"${https_proxy_value}"'"|g' "${kata_config_file}"
|
||||
local hypervisor_name=$(get_hypervisor_name "${shim}")
|
||||
local current_params=$(tomlq -r '.hypervisor.'"${hypervisor_name}"'.kernel_params // ""' "${kata_config_file}" 2>/dev/null || echo "")
|
||||
# Only add if not already present
|
||||
if [[ "${current_params}" != *"agent.https_proxy"* ]]; then
|
||||
local new_params="${current_params}"
|
||||
[[ -n "${new_params}" ]] && new_params+=" "
|
||||
new_params+="agent.https_proxy=${https_proxy_value}"
|
||||
tomlq -i -t '.hypervisor.'"${hypervisor_name}"'.kernel_params = "'"${new_params}"'"' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -633,30 +670,55 @@ function install_artifacts() {
|
||||
fi
|
||||
|
||||
if [[ -n "${no_proxy_value}" ]]; then
|
||||
if ! field_contains_value "${kata_config_file}" "kernel_params" "agent.no_proxy"; then
|
||||
sed -i -e 's|^kernel_params = "\(.*\)"|kernel_params = "\1 agent.no_proxy='"${no_proxy_value}"'"|g' "${kata_config_file}"
|
||||
local hypervisor_name=$(get_hypervisor_name "${shim}")
|
||||
local current_params=$(tomlq -r '.hypervisor.'"${hypervisor_name}"'.kernel_params // ""' "${kata_config_file}" 2>/dev/null || echo "")
|
||||
# Only add if not already present
|
||||
if [[ "${current_params}" != *"agent.no_proxy"* ]]; then
|
||||
local new_params="${current_params}"
|
||||
[[ -n "${new_params}" ]] && new_params+=" "
|
||||
new_params+="agent.no_proxy=${no_proxy_value}"
|
||||
tomlq -i -t '.hypervisor.'"${hypervisor_name}"'.kernel_params = "'"${new_params}"'"' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Allow enabling debug for Kata Containers
|
||||
if [[ "${DEBUG}" == "true" ]]; then
|
||||
if ! config_is_true "${kata_config_file}" "enable_debug"; then
|
||||
sed -i -e 's/^#\{0,1\}\(enable_debug\).*=.*$/\1 = true/g' "${kata_config_file}"
|
||||
fi
|
||||
if ! config_is_true "${kata_config_file}" "debug_console_enabled"; then
|
||||
sed -i -e 's/^#\{0,1\}\(debug_console_enabled\).*=.*$/\1 = true/g' "${kata_config_file}"
|
||||
local hypervisor_name=$(get_hypervisor_name "${shim}")
|
||||
|
||||
# Only set these boolean flags if not already set to true
|
||||
local current_enable_debug=$(tomlq -r '.hypervisor.'"${hypervisor_name}"'.enable_debug // false' "${kata_config_file}" 2>/dev/null || echo "false")
|
||||
if [[ "${current_enable_debug}" != "true" ]]; then
|
||||
tomlq -i -t '.hypervisor.'"${hypervisor_name}"'.enable_debug = true' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
local current_runtime_debug=$(tomlq -r '.runtime.enable_debug // false' "${kata_config_file}" 2>/dev/null || echo "false")
|
||||
if [[ "${current_runtime_debug}" != "true" ]]; then
|
||||
tomlq -i -t '.runtime.enable_debug = true' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
local current_debug_console=$(tomlq -r '.agent.kata.debug_console_enabled // false' "${kata_config_file}" 2>/dev/null || echo "false")
|
||||
if [[ "${current_debug_console}" != "true" ]]; then
|
||||
tomlq -i -t '.agent.kata.debug_console_enabled = true' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
local current_agent_debug=$(tomlq -r '.agent.kata.enable_debug // false' "${kata_config_file}" 2>/dev/null || echo "false")
|
||||
if [[ "${current_agent_debug}" != "true" ]]; then
|
||||
tomlq -i -t '.agent.kata.enable_debug = true' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Add debug kernel params if not already present
|
||||
local current_params=$(tomlq -r '.hypervisor.'"${hypervisor_name}"'.kernel_params // ""' "${kata_config_file}" 2>/dev/null || echo "")
|
||||
local debug_params=""
|
||||
if ! field_contains_value "${kata_config_file}" "kernel_params" "agent.log=debug"; then
|
||||
if [[ "${current_params}" != *"agent.log=debug"* ]]; then
|
||||
debug_params+=" agent.log=debug"
|
||||
fi
|
||||
if ! field_contains_value "${kata_config_file}" "kernel_params" "initcall_debug"; then
|
||||
if [[ "${current_params}" != *"initcall_debug"* ]]; then
|
||||
debug_params+=" initcall_debug"
|
||||
fi
|
||||
if [[ -n "${debug_params}" ]]; then
|
||||
sed -i -e "s/^kernel_params = \"\(.*\)\"/kernel_params = \"\1${debug_params}\"/g" "${kata_config_file}"
|
||||
local new_params="${current_params}${debug_params}"
|
||||
tomlq -i -t '.hypervisor.'"${hypervisor_name}"'.kernel_params = "'"${new_params}"'"' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -702,7 +764,8 @@ function install_artifacts() {
|
||||
fi
|
||||
|
||||
if [[ -n "${all_annotations}" ]]; then
|
||||
local existing_annotations=$(get_field_array_values "${kata_config_file}" "enable_annotations")
|
||||
local hypervisor_name=$(get_hypervisor_name "${shim}")
|
||||
local existing_annotations=$(get_field_array_values "${kata_config_file}" "enable_annotations" "${shim}")
|
||||
|
||||
# Combine existing and new annotations
|
||||
local combined_annotations="${existing_annotations}"
|
||||
@@ -731,16 +794,14 @@ function install_artifacts() {
|
||||
for ann in "${unique_annotations[@]}"; do
|
||||
formatted_annotations+=("\"${ann}\"")
|
||||
done
|
||||
local final_annotations=$(IFS=', '; echo "${formatted_annotations[*]}")
|
||||
sed -i -e "s/^enable_annotations = \[.*\]/enable_annotations = [${final_annotations}]/" "${kata_config_file}"
|
||||
local final_annotations=$(IFS=','; echo "${formatted_annotations[*]}")
|
||||
tomlq -i -t '.hypervisor.'"${hypervisor_name}"'.enable_annotations = ['"${final_annotations}"']' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if printf '%s\n' "${experimental_force_guest_pull[@]}" | grep -Fxq "${shim}"; then
|
||||
if ! config_is_true "${kata_config_file}" "experimental_force_guest_pull"; then
|
||||
sed -i -e 's/^#\{0,1\}\(experimental_force_guest_pull\).*=.*$/\1 = true/g' "${kata_config_file}"
|
||||
fi
|
||||
tomlq -i -t '.runtime.experimental_force_guest_pull = true' "${kata_config_file}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
if grep -q "tdx" <<< "$shim"; then
|
||||
@@ -800,20 +861,38 @@ function install_artifacts() {
|
||||
# Allow Mariner to use custom configuration.
|
||||
if [ "${HOST_OS:-}" == "cbl-mariner" ]; then
|
||||
config_path="${host_install_dir}/share/defaults/kata-containers/configuration-clh.toml"
|
||||
|
||||
if ! config_is_true "${config_path}" "static_sandbox_resource_mgmt"; then
|
||||
sed -i -E "s|(static_sandbox_resource_mgmt)\s*=\s*false|\1=true|" "${config_path}"
|
||||
fi
|
||||
|
||||
clh_path="${dest_dir}/bin/cloud-hypervisor-glibc"
|
||||
local mariner_hypervisor_name="clh"
|
||||
|
||||
if ! field_contains_value "${config_path}" "valid_hypervisor_paths" "${clh_path}"; then
|
||||
sed -i -E "s|(valid_hypervisor_paths) = .+|\1 = [\"${clh_path}\"]|" "${config_path}"
|
||||
tomlq -i -t '.hypervisor.'"${mariner_hypervisor_name}"'.static_sandbox_resource_mgmt = true' "${config_path}" 2>/dev/null || true
|
||||
|
||||
# Append to valid_hypervisor_paths if not already present
|
||||
local existing_paths=$(tomlq -r '.hypervisor.'"${mariner_hypervisor_name}"'.valid_hypervisor_paths // [] | .[]' "${config_path}" 2>/dev/null || echo "")
|
||||
local path_exists=false
|
||||
if [[ -n "${existing_paths}" ]]; then
|
||||
while IFS= read -r path; do
|
||||
path=$(echo "${path}" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
||||
if [[ "${path}" == "${clh_path}" ]]; then
|
||||
path_exists=true
|
||||
break
|
||||
fi
|
||||
done <<< "${existing_paths}"
|
||||
fi
|
||||
|
||||
if ! field_contains_value "${config_path}" "path" "${clh_path}"; then
|
||||
sed -i -E "s|(path) = \".+/cloud-hypervisor\"|\1 = \"${clh_path}\"|" "${config_path}"
|
||||
if [[ "${path_exists}" == "false" ]]; then
|
||||
local formatted_paths=()
|
||||
if [[ -n "${existing_paths}" ]]; then
|
||||
while IFS= read -r path; do
|
||||
path=$(echo "${path}" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
|
||||
formatted_paths+=("\"${path}\"")
|
||||
done <<< "${existing_paths}"
|
||||
fi
|
||||
formatted_paths+=("\"${clh_path}\"")
|
||||
local final_paths=$(IFS=','; echo "${formatted_paths[*]}")
|
||||
tomlq -i -t '.hypervisor.'"${mariner_hypervisor_name}"'.valid_hypervisor_paths = ['"${final_paths}"']' "${config_path}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
tomlq -i -t '.hypervisor.'"${mariner_hypervisor_name}"'.path = "'"${clh_path}"'"' "${config_path}" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
local expand_runtime_classes_for_nfd=false
|
||||
|
||||
@@ -108,15 +108,14 @@ case "${RUNTIME_CHOICE}" in
|
||||
esac
|
||||
|
||||
for vmm in ${VMM_CONFIGS}; do
|
||||
config_file="${DESTDIR}/${PREFIX}/share/defaults/kata-containers/configuration-${vmm}.toml"
|
||||
if [ -f ${config_file} ]; then
|
||||
if [ ${ARCH} == "ppc64le" ]; then
|
||||
sed -i -e '/^image =/d' ${config_file}
|
||||
sed -i 's/^# \(initrd =.*\)/\1/g' ${config_file}
|
||||
else
|
||||
sed -i -e '/^initrd =/d' ${config_file}
|
||||
fi
|
||||
fi
|
||||
for config_file in "${DESTDIR}/${PREFIX}/share/defaults/kata-containers/configuration-${vmm}"*.toml; do
|
||||
if [ -f "${config_file}" ]; then
|
||||
if [ ${ARCH} == "ppc64le" ]; then
|
||||
# On ppc64le, replace image line with initrd line
|
||||
sed -i -e 's|^image = .*|initrd = "'${PREFIX}'/share/kata-containers/kata-containers-initrd.img"|' "${config_file}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
pushd "${DESTDIR}/${PREFIX}/share/defaults/kata-containers"
|
||||
|
||||
Reference in New Issue
Block a user