diff --git a/src/runtime/Makefile b/src/runtime/Makefile index 1874020846..daed6086d6 100644 --- a/src/runtime/Makefile +++ b/src/runtime/Makefile @@ -96,6 +96,7 @@ GENERATED_VARS = \ CONFIG_QEMU_IN \ CONFIG_CLH_IN \ CONFIG_FC_IN \ + CONFIG_CLH_TDX_IN \ CONFIG_QEMU_TDX_IN \ $(USER_VARS) SCRIPTS += $(COLLECT_SCRIPT) @@ -115,6 +116,7 @@ FIRMWAREPATH := FIRMWAREVOLUMEPATH := TDVFFIRMWAREPATH := $(PREFIXDEPS)/share/tdvf/OVMF_CODE.fd TDVFFIRMWAREVOLUMEPATH := $(PREFIXDEPS)/share/tdvf/OVMF_VARS.fd +TDSHIMFIRMWAREPATH := ${PREFIXDEPS}/share/td-shim/td-shim.bin TDXKERNELPARAMS := tdx_disable_filter # Name of default configuration file the runtime will use. @@ -305,12 +307,28 @@ ifneq (,$(CLHCMD)) CONFIGS += $(CONFIG_CLH) + CONFIG_FILE_CLH_TDX = configuration-clh-tdx.toml + CONFIG_CLH_TDX = config/$(CONFIG_FILE_CLH_TDX) + CONFIG_CLH_TDX_IN = $(CONFIG_CLH_TDX).in + + CONFIG_PATH_CLH_TDX = $(abspath $(CONFDIR)/$(CONFIG_FILE_CLH_TDX)) + CONFIG_PATHS += $(CONFIG_PATH_CLH_TDX) + + SYSCONFIG_CLH_TDX = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_CLH_TDX)) + SYSCONFIG_PATHS += $(SYSCONFIG_CLH_TDX) + + CONFIGS += $(CONFIG_CLH_TDX) + # CLH-specific options (all should be suffixed by "_CLH") # currently, huge pages are required for virtiofsd support DEFNETWORKMODEL_CLH := tcfilter KERNELTYPE_CLH = uncompressed KERNEL_NAME_CLH = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_CLH)) KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH) + + KERNELTDXTYPE_CLH = compressed + KERNELTDXNAME_CLH = $(call MAKE_KERNEL_TDX_NAME,$(KERNELTDXTYPE_CLH)) + KERNELTDXPATH_CLH = $(KERNELDIR)/$(KERNELTDXNAME_CLH) endif ifneq (,$(FCCMD)) @@ -441,10 +459,12 @@ USER_VARS += KERNELPATH_ACRN USER_VARS += KERNELPATH USER_VARS += KERNELTDXPATH USER_VARS += KERNELPATH_CLH +USER_VARS += KERNELTDXPATH_CLH USER_VARS += KERNELPATH_FC USER_VARS += KERNELVIRTIOFSPATH USER_VARS += FIRMWAREPATH USER_VARS += FIRMWAREVOLUMEPATH +USER_VARS += TDSHIMFIRMWAREPATH USER_VARS += TDVFFIRMWAREPATH USER_VARS += TDVFFIRMWAREVOLUMEPATH USER_VARS += MACHINEACCELERATORS diff --git a/src/runtime/config/configuration-clh-tdx.toml.in b/src/runtime/config/configuration-clh-tdx.toml.in new file mode 100644 index 0000000000..798098afb1 --- /dev/null +++ b/src/runtime/config/configuration-clh-tdx.toml.in @@ -0,0 +1,428 @@ +# Copyright (c) 2019 Ericsson Eurolab Deutschland GmbH +# Copyright (c) 2021 Adobe Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# XXX: WARNING: this file is auto-generated. +# XXX: +# XXX: Source file: "@CONFIG_CLH_IN@" +# XXX: Project: +# XXX: Name: @PROJECT_NAME@ +# XXX: Type: @PROJECT_TYPE@ + +[hypervisor.clh] +path = "@CLHPATH@" +kernel = "@KERNELTDXPATH_CLH@" +image = "@IMAGEPATH@" + +# Enable confidential guest support. +# Toggling that setting may trigger different hardware features, ranging +# from memory encryption to both memory and CPU-state encryption and integrity. +# The Kata Containers runtime dynamically detects the available feature set and +# aims at enabling the largest possible one, returning an error if none is +# available, or none is supported by the hypervisor. +# +# Known limitations: +# * Does not work by design: +# - CPU Hotplug +# - Memory Hotplug +# - NVDIMM devices +# +# Supported TEEs: +# * Intel TDX +# +# Default false +confidential_guest = true + +# disable applying SELinux on the VMM process (default false) +disable_selinux=@DEFDISABLESELINUX@ + +# Path to the firmware. +# If you want Cloud Hypervisor to use a specific firmware, set its path below. +# This is option is only used when confidential_guest is enabled. +# +# For more information about firmwared that can be used with specific TEEs, +# please, refer to: +# * Intel TDX: +# - td-shim: https://github.com/confidential-containers/td-shim +# +firmware = "@TDSHIMFIRMWAREPATH@" + +# List of valid annotation names for the hypervisor +# Each member of the list is a regular expression, which is the base name +# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path" +enable_annotations = @DEFENABLEANNOTATIONS@ + +# List of valid annotations values for the hypervisor +# Each member of the list is a path pattern as described by glob(3). +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @CLHVALIDHYPERVISORPATHS@ +valid_hypervisor_paths = @CLHVALIDHYPERVISORPATHS@ + +# Optional space-separated list of options to pass to the guest kernel. +# For example, use `kernel_params = "vsyscall=emulate"` if you are having +# trouble running pre-2.15 glibc. +# +# WARNING: - any parameter specified here will take priority over the default +# parameter value of the same name used to start the virtual machine. +# Do not set values here unless you understand the impact of doing so as you +# may stop the virtual machine from booting. +# To see the list of default parameters, enable hypervisor debug, create a +# container and look for 'default-kernel-parameters' log entries. +kernel_params = "@TDXKERNELPARAMS@" + +# Default number of vCPUs per SB/VM: +# unspecified or 0 --> will be set to @DEFVCPUS@ +# < 0 --> will be set to the actual number of physical cores +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores +default_vcpus = 1 + +# Default maximum number of vCPUs per SB/VM: +# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# > 0 <= number of physical cores --> will be set to the specified number +# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number +# of vCPUs supported by KVM if that number is exceeded +# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when +# the actual number of physical cores is greater than it. +# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU +# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs +# can be added to a SB/VM, but the memory footprint will be big. Another example, with +# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of +# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, +# unless you know what are you doing. +default_maxvcpus = @DEFMAXVCPUS@ + +# Default memory size in MiB for SB/VM. +# If unspecified then it will be set @DEFMEMSZ@ MiB. +default_memory = @DEFMEMSZ@ + +# Default memory slots per SB/VM. +# If unspecified then it will be set @DEFMEMSLOTS@. +# This is will determine the times that memory will be hotadded to sandbox/VM. +#memory_slots = @DEFMEMSLOTS@ + +# Default maximum memory in MiB per SB / VM +# unspecified or == 0 --> will be set to the actual amount of physical RAM +# > 0 <= amount of physical RAM --> will be set to the specified number +# > amount of physical RAM --> will be set to the actual amount of physical RAM +default_maxmemory = @DEFMAXMEMSZ@ + +# Shared file system type: +# - virtio-fs (default) +# - virtio-fs-nydus +shared_fs = "@DEFSHAREDFS_CLH_VIRTIOFS@" + +# Path to vhost-user-fs daemon. +virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@" + +# List of valid annotations values for the virtiofs daemon +# The default if not set is empty (all annotations rejected.) +# Your distribution recommends: @DEFVALIDVIRTIOFSDAEMONPATHS@ +valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@ + +# Default size of DAX cache in MiB +virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@ + +# Extra args for virtiofsd daemon +# +# Format example: +# ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] +# Examples: +# Set virtiofsd log level to debug : ["-o", "log_level=debug"] or ["-d"] +# see `virtiofsd -h` for possible options. +virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@ + +# Cache mode: +# +# - none +# Metadata, data, and pathname lookup are not cached in guest. They are +# always fetched from host and any changes are immediately pushed to host. +# +# - auto +# Metadata and pathname lookup cache expires after a configured amount of +# time (default is 1 second). Data is cached while the file is open (close +# to open consistency). +# +# - always +# Metadata, data, and pathname lookup are cached in guest and never expire. +virtio_fs_cache = "@DEFVIRTIOFSCACHE@" + +# Block storage driver to be used for the hypervisor in case the container +# rootfs is backed by a block device. This is virtio-blk. +block_device_driver = "virtio-blk" + +# Enable huge pages for VM RAM, default false +# Enabling this will result in the VM memory +# being allocated using huge pages. +#enable_hugepages = true + +# Disable the 'seccomp' feature from Cloud Hypervisor, default false +# disable_seccomp = true + +# This option changes the default hypervisor and kernel parameters +# to enable debug output where available. +# +# Default false +#enable_debug = true + +# Path to OCI hook binaries in the *guest rootfs*. +# This does not affect host-side hooks which must instead be added to +# the OCI spec passed to the runtime. +# +# You can create a rootfs with hooks by customizing the osbuilder scripts: +# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder +# +# Hooks must be stored in a subdirectory of guest_hook_path according to their +# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}". +# The agent will scan these directories for executable files and add them, in +# lexicographical order, to the lifecycle of the guest container. +# Hooks are executed in the runtime namespace of the guest. See the official documentation: +# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks +# Warnings will be logged if any error is encountered while scanning for hooks, +# but it will not abort container execution. +#guest_hook_path = "/usr/share/oci/hooks" +# +# These options are related to network rate limiter at the VMM level, and are +# based on the Cloud Hypervisor I/O throttling. Those are disabled by default +# and we strongly advise users to refer the Cloud Hypervisor official +# documentation for a better understanding of its internals: +# https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md +# +# Bandwidth rate limiter options +# +# net_rate_limiter_bw_max_rate controls network I/O bandwidth (size in bits/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#net_rate_limiter_bw_max_rate = 0 +# +# net_rate_limiter_bw_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is +# set to a non zero value. +#net_rate_limiter_bw_one_time_burst = 0 +# +# Operation rate limiter options +# +# net_rate_limiter_ops_max_rate controls network I/O bandwidth (size in ops/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#net_rate_limiter_ops_max_rate = 0 +# +# net_rate_limiter_ops_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is +# set to a non zero value. +#net_rate_limiter_ops_one_time_burst = 0 +# +# These options are related to disk rate limiter at the VMM level, and are +# based on the Cloud Hypervisor I/O throttling. Those are disabled by default +# and we strongly advise users to refer the Cloud Hypervisor official +# documentation for a better understanding of its internals: +# https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md +# +# Bandwidth rate limiter options +# +# disk_rate_limiter_bw_max_rate controls disk I/O bandwidth (size in bits/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#disk_rate_limiter_bw_max_rate = 0 +# +# disk_rate_limiter_bw_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is +# set to a non zero value. +#disk_rate_limiter_bw_one_time_burst = 0 +# +# Operation rate limiter options +# +# disk_rate_limiter_ops_max_rate controls disk I/O bandwidth (size in ops/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#disk_rate_limiter_ops_max_rate = 0 +# +# disk_rate_limiter_ops_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is +# set to a non zero value. +#disk_rate_limiter_ops_one_time_burst = 0 + +[agent.@PROJECT_TYPE@] +# If enabled, make the agent display debug-level messages. +# (default: disabled) +#enable_debug = true + +# Enable agent tracing. +# +# If enabled, the agent will generate OpenTelemetry trace spans. +# +# Notes: +# +# - If the runtime also has tracing enabled, the agent spans will be +# associated with the appropriate runtime parent span. +# - If enabled, the runtime will wait for the container to shutdown, +# increasing the container shutdown time slightly. +# +# (default: disabled) +#enable_tracing = true + +# Enable debug console. + +# If enabled, user can connect guest OS running inside hypervisor +# through "kata-runtime exec " command + +#debug_console_enabled = true + +# Agent connection dialing timeout value in seconds +# (default: 30) +#dial_timeout = 30 + +[runtime] +# If enabled, the runtime will log additional debug messages to the +# system log +# (default: disabled) +#enable_debug = true +# +# Internetworking model +# Determines how the VM should be connected to the +# the container network interface +# Options: +# +# - macvtap +# Used when the Container network interface can be bridged using +# macvtap. +# +# - none +# Used when customize network. Only creates a tap device. No veth pair. +# +# - tcfilter +# Uses tc filter rules to redirect traffic from the network interface +# provided by plugin to a tap interface connected to the VM. +# +internetworking_model="@DEFNETWORKMODEL_CLH@" + +# disable guest seccomp +# Determines whether container seccomp profiles are passed to the virtual +# machine and applied by the kata agent. If set to true, seccomp is not applied +# within the guest +# (default: true) +disable_guest_seccomp=@DEFDISABLEGUESTSECCOMP@ + +# If enabled, the runtime will create opentracing.io traces and spans. +# (See https://www.jaegertracing.io/docs/getting-started). +# (default: disabled) +#enable_tracing = true + +# Set the full url to the Jaeger HTTP Thrift collector. +# The default if not set will be "http://localhost:14268/api/traces" +#jaeger_endpoint = "" + +# Sets the username to be used if basic auth is required for Jaeger. +#jaeger_user = "" + +# Sets the password to be used if basic auth is required for Jaeger. +#jaeger_password = "" + +# If enabled, the runtime will not create a network namespace for shim and hypervisor processes. +# This option may have some potential impacts to your host. It should only be used when you know what you're doing. +# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only +# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge +# (like OVS) directly. +# (default: false) +#disable_new_netns = true + +# if enabled, the runtime will add all the kata processes inside one dedicated cgroup. +# The container cgroups in the host are not created, just one single cgroup per sandbox. +# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. +# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. +# The sandbox cgroup is constrained if there is no container type annotation. +# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType +sandbox_cgroup_only=@DEFSANDBOXCGROUPONLY@ + +# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In +# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful +# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug. +# Compatibility for determining appropriate sandbox (VM) size: +# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O +# does not yet support sandbox sizing annotations. +# - When running single containers using a tool like ctr, container sizing information will be available. +static_sandbox_resource_mgmt=@DEFSTATICRESOURCEMGMT@ + +# If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path. +# This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory. +# If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts` +# These will not be exposed to the container workloads, and are only provided for potential guest services. +sandbox_bind_mounts=@DEFBINDMOUNTS@ + +# VFIO Mode +# Determines how VFIO devices should be be presented to the container. +# Options: +# +# - vfio +# Matches behaviour of OCI runtimes (e.g. runc) as much as +# possible. VFIO devices will appear in the container as VFIO +# character devices under /dev/vfio. The exact names may differ +# from the host (they need to match the VM's IOMMU group numbers +# rather than the host's) +# +# - guest-kernel +# This is a Kata-specific behaviour that's useful in certain cases. +# The VFIO device is managed by whatever driver in the VM kernel +# claims it. This means it will appear as one or more device nodes +# or network interfaces depending on the nature of the device. +# Using this mode requires specially built workloads that know how +# to locate the relevant device interfaces within the VM. +# +vfio_mode="@DEFVFIOMODE@" + +# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will +# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest. +disable_guest_empty_dir=@DEFDISABLEGUESTEMPTYDIR@ + +# Enabled experimental feature list, format: ["a", "b"]. +# Experimental features are features not stable enough for production, +# they may break compatibility, and are prepared for a big version bump. +# Supported experimental features: +# (default: []) +experimental=@DEFAULTEXPFEATURES@ + +# If enabled, user can run pprof tools with shim v2 process through kata-monitor. +# (default: false) +# enable_pprof = true + +# WARNING: All the options in the following section have not been implemented yet. +# This section was added as a placeholder. DO NOT USE IT! +[image] +# Container image service. +# +# Offload the CRI image management service to the Kata agent. +# (default: false) +service_offload = @DEFSERVICEOFFLOAD@ + +# Container image decryption keys provisioning. +# Applies only if service_offload is true. +# Keys can be provisioned locally (e.g. through a special command or +# a local file) or remotely (usually after the guest is remotely attested). +# The provision setting is a complete URL that lets the Kata agent decide +# which method to use in order to fetch the keys. +# +# Keys can be stored in a local file, in a measured and attested initrd: +#provision=data:///local/key/file +# +# Keys could be fetched through a special command or binary from the +# initrd (guest) image, e.g. a firmware call: +#provision=file:///path/to/bin/fetcher/in/guest +# +# Keys can be remotely provisioned. The Kata agent fetches them from e.g. +# a HTTPS URL: +#provision=https://my-key-broker.foo/tenant/