mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-28 03:42:09 +00:00
Implement HypervisorLoglevel config option for clh. Signed-off-by: Cameron Baird <cameronbaird@microsoft.com>
477 lines
20 KiB
TOML
477 lines
20 KiB
TOML
# Copyright (c) 2019 Ericsson Eurolab Deutschland GmbH
|
|
# Copyright (c) 2021 Adobe Inc.
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
#
|
|
|
|
# XXX: WARNING: this file is auto-generated.
|
|
# XXX:
|
|
# XXX: Source file: "@CONFIG_CLH_IN@"
|
|
# XXX: Project:
|
|
# XXX: Name: @PROJECT_NAME@
|
|
# XXX: Type: @PROJECT_TYPE@
|
|
|
|
[hypervisor.clh]
|
|
path = "@CLHPATH@"
|
|
kernel = "@KERNELPATH_CLH@"
|
|
image = "@IMAGEPATH@"
|
|
|
|
# rootfs filesystem type:
|
|
# - ext4 (default)
|
|
# - xfs
|
|
# - erofs
|
|
rootfs_type=@DEFROOTFSTYPE@
|
|
|
|
# Enable confidential guest support.
|
|
# Toggling that setting may trigger different hardware features, ranging
|
|
# from memory encryption to both memory and CPU-state encryption and integrity.
|
|
# The Kata Containers runtime dynamically detects the available feature set and
|
|
# aims at enabling the largest possible one, returning an error if none is
|
|
# available, or none is supported by the hypervisor.
|
|
#
|
|
# Known limitations:
|
|
# * Does not work by design:
|
|
# - CPU Hotplug
|
|
# - Memory Hotplug
|
|
# - NVDIMM devices
|
|
#
|
|
# Supported TEEs:
|
|
# * Intel TDX
|
|
#
|
|
# Default false
|
|
# confidential_guest = true
|
|
|
|
# Enable running clh VMM as a non-root user.
|
|
# By default clh VMM run as root. When this is set to true, clh VMM process runs as
|
|
# a non-root random user. See documentation for the limitations of this mode.
|
|
# rootless = true
|
|
|
|
# disable applying SELinux on the VMM process (default false)
|
|
disable_selinux=@DEFDISABLESELINUX@
|
|
|
|
# disable applying SELinux on the container process
|
|
# If set to false, the type `container_t` is applied to the container process by default.
|
|
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
|
|
# with `SELINUX=yes`.
|
|
# (default: true)
|
|
disable_guest_selinux=@DEFDISABLEGUESTSELINUX@
|
|
|
|
# Path to the firmware.
|
|
# If you want Cloud Hypervisor to use a specific firmware, set its path below.
|
|
# This is option is only used when confidential_guest is enabled.
|
|
#
|
|
# For more information about firmwared that can be used with specific TEEs,
|
|
# please, refer to:
|
|
# * Intel TDX:
|
|
# - td-shim: https://github.com/confidential-containers/td-shim
|
|
#
|
|
# firmware = "@FIRMWAREPATH@"
|
|
|
|
# List of valid annotation names for the hypervisor
|
|
# Each member of the list is a regular expression, which is the base name
|
|
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
|
|
enable_annotations = @DEFENABLEANNOTATIONS@
|
|
|
|
# List of valid annotations values for the hypervisor
|
|
# Each member of the list is a path pattern as described by glob(3).
|
|
# The default if not set is empty (all annotations rejected.)
|
|
# Your distribution recommends: @CLHVALIDHYPERVISORPATHS@
|
|
valid_hypervisor_paths = @CLHVALIDHYPERVISORPATHS@
|
|
|
|
# Optional space-separated list of options to pass to the guest kernel.
|
|
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
|
|
# trouble running pre-2.15 glibc.
|
|
#
|
|
# WARNING: - any parameter specified here will take priority over the default
|
|
# parameter value of the same name used to start the virtual machine.
|
|
# Do not set values here unless you understand the impact of doing so as you
|
|
# may stop the virtual machine from booting.
|
|
# To see the list of default parameters, enable hypervisor debug, create a
|
|
# container and look for 'default-kernel-parameters' log entries.
|
|
kernel_params = "@KERNELPARAMS@"
|
|
|
|
# Default number of vCPUs per SB/VM:
|
|
# unspecified or 0 --> will be set to @DEFVCPUS@
|
|
# < 0 --> will be set to the actual number of physical cores
|
|
# > 0 <= number of physical cores --> will be set to the specified number
|
|
# > number of physical cores --> will be set to the actual number of physical cores
|
|
default_vcpus = 1
|
|
|
|
# Default maximum number of vCPUs per SB/VM:
|
|
# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number
|
|
# of vCPUs supported by KVM if that number is exceeded
|
|
# > 0 <= number of physical cores --> will be set to the specified number
|
|
# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number
|
|
# of vCPUs supported by KVM if that number is exceeded
|
|
# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
|
|
# the actual number of physical cores is greater than it.
|
|
# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
|
|
# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
|
|
# can be added to a SB/VM, but the memory footprint will be big. Another example, with
|
|
# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
|
|
# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
|
|
# unless you know what are you doing.
|
|
default_maxvcpus = @DEFMAXVCPUS@
|
|
|
|
# Default memory size in MiB for SB/VM.
|
|
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
|
default_memory = @DEFMEMSZ@
|
|
|
|
# Default memory slots per SB/VM.
|
|
# If unspecified then it will be set @DEFMEMSLOTS@.
|
|
# This is will determine the times that memory will be hotadded to sandbox/VM.
|
|
#memory_slots = @DEFMEMSLOTS@
|
|
|
|
# Default maximum memory in MiB per SB / VM
|
|
# unspecified or == 0 --> will be set to the actual amount of physical RAM
|
|
# > 0 <= amount of physical RAM --> will be set to the specified number
|
|
# > amount of physical RAM --> will be set to the actual amount of physical RAM
|
|
default_maxmemory = @DEFMAXMEMSZ@
|
|
|
|
# Shared file system type:
|
|
# - virtio-fs (default)
|
|
# - virtio-fs-nydus
|
|
# - none
|
|
shared_fs = "@DEFSHAREDFS_CLH_VIRTIOFS@"
|
|
|
|
# Path to vhost-user-fs daemon.
|
|
virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
|
|
|
|
# List of valid annotations values for the virtiofs daemon
|
|
# The default if not set is empty (all annotations rejected.)
|
|
# Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@
|
|
valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@
|
|
|
|
# Default size of DAX cache in MiB
|
|
virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
|
|
|
|
# Default size of virtqueues
|
|
virtio_fs_queue_size = @DEFVIRTIOFSQUEUESIZE@
|
|
|
|
# Extra args for virtiofsd daemon
|
|
#
|
|
# Format example:
|
|
# ["--arg1=xxx", "--arg2=yyy"]
|
|
# Examples:
|
|
# Set virtiofsd log level to debug : ["--log-level=debug"]
|
|
# see `virtiofsd -h` for possible options.
|
|
virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
|
|
|
|
# Cache mode:
|
|
#
|
|
# - never
|
|
# Metadata, data, and pathname lookup are not cached in guest. They are
|
|
# always fetched from host and any changes are immediately pushed to host.
|
|
#
|
|
# - auto
|
|
# Metadata and pathname lookup cache expires after a configured amount of
|
|
# time (default is 1 second). Data is cached while the file is open (close
|
|
# to open consistency).
|
|
#
|
|
# - always
|
|
# Metadata, data, and pathname lookup are cached in guest and never expire.
|
|
virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
|
|
|
|
# Block storage driver to be used for the hypervisor in case the container
|
|
# rootfs is backed by a block device. This is virtio-blk.
|
|
block_device_driver = "virtio-blk"
|
|
|
|
# Specifies cache-related options will be set to block devices or not.
|
|
# Default false
|
|
#block_device_cache_set = true
|
|
|
|
# Specifies cache-related options for block devices.
|
|
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
|
|
# Default false
|
|
#block_device_cache_direct = true
|
|
|
|
# Enable huge pages for VM RAM, default false
|
|
# Enabling this will result in the VM memory
|
|
# being allocated using huge pages.
|
|
#enable_hugepages = true
|
|
|
|
# Disable the 'seccomp' feature from Cloud Hypervisor, default false
|
|
# disable_seccomp = true
|
|
|
|
# Enable vIOMMU, default false
|
|
# Enabling this will result in the VM having a vIOMMU device
|
|
# This will also add the following options to the kernel's
|
|
# command line: iommu=pt
|
|
#enable_iommu = true
|
|
|
|
# This option changes the default hypervisor and kernel parameters
|
|
# to enable debug output where available.
|
|
#
|
|
# Default false
|
|
#enable_debug = true
|
|
|
|
# This option specifies the loglevel of the hypervisor
|
|
#
|
|
# Default 1
|
|
#hypervisor_loglevel = 1
|
|
|
|
# Enable hot-plugging of VFIO devices to a root-port.
|
|
# The default setting is "no-port"
|
|
#hot_plug_vfio = "root-port"
|
|
|
|
# Path to OCI hook binaries in the *guest rootfs*.
|
|
# This does not affect host-side hooks which must instead be added to
|
|
# the OCI spec passed to the runtime.
|
|
#
|
|
# You can create a rootfs with hooks by customizing the osbuilder scripts:
|
|
# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder
|
|
#
|
|
# Hooks must be stored in a subdirectory of guest_hook_path according to their
|
|
# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}".
|
|
# The agent will scan these directories for executable files and add them, in
|
|
# lexicographical order, to the lifecycle of the guest container.
|
|
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
|
|
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
|
|
# Warnings will be logged if any error is encountered while scanning for hooks,
|
|
# but it will not abort container execution.
|
|
#guest_hook_path = "/usr/share/oci/hooks"
|
|
#
|
|
# These options are related to network rate limiter at the VMM level, and are
|
|
# based on the Cloud Hypervisor I/O throttling. Those are disabled by default
|
|
# and we strongly advise users to refer the Cloud Hypervisor official
|
|
# documentation for a better understanding of its internals:
|
|
# https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md
|
|
#
|
|
# Bandwidth rate limiter options
|
|
#
|
|
# net_rate_limiter_bw_max_rate controls network I/O bandwidth (size in bits/sec
|
|
# for SB/VM).
|
|
# The same value is used for inbound and outbound bandwidth.
|
|
# Default 0-sized value means unlimited rate.
|
|
#net_rate_limiter_bw_max_rate = 0
|
|
#
|
|
# net_rate_limiter_bw_one_time_burst increases the initial max rate and this
|
|
# initial extra credit does *NOT* affect the overall limit and can be used for
|
|
# an *initial* burst of data.
|
|
# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is
|
|
# set to a non zero value.
|
|
#net_rate_limiter_bw_one_time_burst = 0
|
|
#
|
|
# Operation rate limiter options
|
|
#
|
|
# net_rate_limiter_ops_max_rate controls network I/O bandwidth (size in ops/sec
|
|
# for SB/VM).
|
|
# The same value is used for inbound and outbound bandwidth.
|
|
# Default 0-sized value means unlimited rate.
|
|
#net_rate_limiter_ops_max_rate = 0
|
|
#
|
|
# net_rate_limiter_ops_one_time_burst increases the initial max rate and this
|
|
# initial extra credit does *NOT* affect the overall limit and can be used for
|
|
# an *initial* burst of data.
|
|
# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is
|
|
# set to a non zero value.
|
|
#net_rate_limiter_ops_one_time_burst = 0
|
|
#
|
|
# These options are related to disk rate limiter at the VMM level, and are
|
|
# based on the Cloud Hypervisor I/O throttling. Those are disabled by default
|
|
# and we strongly advise users to refer the Cloud Hypervisor official
|
|
# documentation for a better understanding of its internals:
|
|
# https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md
|
|
#
|
|
# Bandwidth rate limiter options
|
|
#
|
|
# disk_rate_limiter_bw_max_rate controls disk I/O bandwidth (size in bits/sec
|
|
# for SB/VM).
|
|
# The same value is used for inbound and outbound bandwidth.
|
|
# Default 0-sized value means unlimited rate.
|
|
#disk_rate_limiter_bw_max_rate = 0
|
|
#
|
|
# disk_rate_limiter_bw_one_time_burst increases the initial max rate and this
|
|
# initial extra credit does *NOT* affect the overall limit and can be used for
|
|
# an *initial* burst of data.
|
|
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
|
# set to a non zero value.
|
|
#disk_rate_limiter_bw_one_time_burst = 0
|
|
#
|
|
# Operation rate limiter options
|
|
#
|
|
# disk_rate_limiter_ops_max_rate controls disk I/O bandwidth (size in ops/sec
|
|
# for SB/VM).
|
|
# The same value is used for inbound and outbound bandwidth.
|
|
# Default 0-sized value means unlimited rate.
|
|
#disk_rate_limiter_ops_max_rate = 0
|
|
#
|
|
# disk_rate_limiter_ops_one_time_burst increases the initial max rate and this
|
|
# initial extra credit does *NOT* affect the overall limit and can be used for
|
|
# an *initial* burst of data.
|
|
# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is
|
|
# set to a non zero value.
|
|
#disk_rate_limiter_ops_one_time_burst = 0
|
|
|
|
[agent.@PROJECT_TYPE@]
|
|
# If enabled, make the agent display debug-level messages.
|
|
# (default: disabled)
|
|
#enable_debug = true
|
|
|
|
# Enable agent tracing.
|
|
#
|
|
# If enabled, the agent will generate OpenTelemetry trace spans.
|
|
#
|
|
# Notes:
|
|
#
|
|
# - If the runtime also has tracing enabled, the agent spans will be
|
|
# associated with the appropriate runtime parent span.
|
|
# - If enabled, the runtime will wait for the container to shutdown,
|
|
# increasing the container shutdown time slightly.
|
|
#
|
|
# (default: disabled)
|
|
#enable_tracing = true
|
|
|
|
# Enable debug console.
|
|
|
|
# If enabled, user can connect guest OS running inside hypervisor
|
|
# through "kata-runtime exec <sandbox-id>" command
|
|
|
|
#debug_console_enabled = true
|
|
|
|
# Agent connection dialing timeout value in seconds
|
|
# (default: 45)
|
|
dial_timeout = 45
|
|
|
|
# Confidential Data Hub API timeout value in seconds
|
|
# (default: 50)
|
|
#cdh_api_timeout = 50
|
|
|
|
[runtime]
|
|
# If enabled, the runtime will log additional debug messages to the
|
|
# system log
|
|
# (default: disabled)
|
|
#enable_debug = true
|
|
#
|
|
# Internetworking model
|
|
# Determines how the VM should be connected to the
|
|
# the container network interface
|
|
# Options:
|
|
#
|
|
# - macvtap
|
|
# Used when the Container network interface can be bridged using
|
|
# macvtap.
|
|
#
|
|
# - none
|
|
# Used when customize network. Only creates a tap device. No veth pair.
|
|
#
|
|
# - tcfilter
|
|
# Uses tc filter rules to redirect traffic from the network interface
|
|
# provided by plugin to a tap interface connected to the VM.
|
|
#
|
|
internetworking_model="@DEFNETWORKMODEL_CLH@"
|
|
|
|
# disable guest seccomp
|
|
# Determines whether container seccomp profiles are passed to the virtual
|
|
# machine and applied by the kata agent. If set to true, seccomp is not applied
|
|
# within the guest
|
|
# (default: true)
|
|
disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@
|
|
|
|
# Apply a custom SELinux security policy to the container process inside the VM.
|
|
# This is used when you want to apply a type other than the default `container_t`,
|
|
# so general users should not uncomment and apply it.
|
|
# (format: "user:role:type")
|
|
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
|
|
# categories are determined automatically by high-level container runtimes such as containerd.
|
|
#guest_selinux_label="@DEFGUESTSELINUXLABEL@"
|
|
|
|
# If enabled, the runtime will create opentracing.io traces and spans.
|
|
# (See https://www.jaegertracing.io/docs/getting-started).
|
|
# (default: disabled)
|
|
#enable_tracing = true
|
|
|
|
# Set the full url to the Jaeger HTTP Thrift collector.
|
|
# The default if not set will be "http://localhost:14268/api/traces"
|
|
#jaeger_endpoint = ""
|
|
|
|
# Sets the username to be used if basic auth is required for Jaeger.
|
|
#jaeger_user = ""
|
|
|
|
# Sets the password to be used if basic auth is required for Jaeger.
|
|
#jaeger_password = ""
|
|
|
|
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
|
|
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
|
|
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
|
|
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
|
|
# (like OVS) directly.
|
|
# (default: false)
|
|
#disable_new_netns = true
|
|
|
|
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
|
|
# The container cgroups in the host are not created, just one single cgroup per sandbox.
|
|
# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
|
|
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
|
|
# The sandbox cgroup is constrained if there is no container type annotation.
|
|
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
|
|
sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@
|
|
|
|
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
|
|
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
|
|
# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug.
|
|
# Compatibility for determining appropriate sandbox (VM) size:
|
|
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
|
|
# does not yet support sandbox sizing annotations.
|
|
# - When running single containers using a tool like ctr, container sizing information will be available.
|
|
static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT_CLH@
|
|
|
|
# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
|
|
# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
|
|
# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
|
|
# These will not be exposed to the container workloads, and are only provided for potential guest services.
|
|
sandbox_bind_mounts=@DEFBINDMOUNTS@
|
|
|
|
# VFIO Mode
|
|
# Determines how VFIO devices should be be presented to the container.
|
|
# Options:
|
|
#
|
|
# - vfio
|
|
# Matches behaviour of OCI runtimes (e.g. runc) as much as
|
|
# possible. VFIO devices will appear in the container as VFIO
|
|
# character devices under /dev/vfio. The exact names may differ
|
|
# from the host (they need to match the VM's IOMMU group numbers
|
|
# rather than the host's)
|
|
#
|
|
# - guest-kernel
|
|
# This is a Kata-specific behaviour that's useful in certain cases.
|
|
# The VFIO device is managed by whatever driver in the VM kernel
|
|
# claims it. This means it will appear as one or more device nodes
|
|
# or network interfaces depending on the nature of the device.
|
|
# Using this mode requires specially built workloads that know how
|
|
# to locate the relevant device interfaces within the VM.
|
|
#
|
|
vfio_mode="@DEFVFIOMODE@"
|
|
|
|
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
|
|
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
|
|
disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@
|
|
|
|
# Enabled experimental feature list, format: ["a", "b"].
|
|
# Experimental features are features not stable enough for production,
|
|
# they may break compatibility, and are prepared for a big version bump.
|
|
# Supported experimental features:
|
|
# (default: [])
|
|
experimental=@DEFAULTEXPFEATURES@
|
|
|
|
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
|
|
# (default: false)
|
|
# enable_pprof = true
|
|
|
|
# Indicates the CreateContainer request timeout needed for the workload(s)
|
|
# It using guest_pull this includes the time to pull the image inside the guest
|
|
# Defaults to @DEFCREATECONTAINERTIMEOUT@ second(s)
|
|
# Note: The effective timeout is determined by the lesser of two values: runtime-request-timeout from kubelet config
|
|
# (https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#:~:text=runtime%2Drequest%2Dtimeout) and create_container_timeout.
|
|
# In essence, the timeout used for guest pull=runtime-request-timeout<create_container_timeout?runtime-request-timeout:create_container_timeout.
|
|
create_container_timeout = @DEFCREATECONTAINERTIMEOUT@
|
|
|
|
# Base directory of directly attachable network config.
|
|
# Network devices for VM-based containers are allowed to be placed in the
|
|
# host netns to eliminate as many hops as possible, which is what we
|
|
# called a "Directly Attachable Network". The config, set by special CNI
|
|
# plugins, is used to tell the Kata containers what devices are attached
|
|
# to the hypervisor.
|
|
# (default: /run/kata-containers/dans)
|
|
dan_conf = "@DEFDANCONF@"
|