Merge pull request #7796 from WenyuanLau/7794/StratoVirt_VMM_support

StratoVirt: add support for a lightweight VMM StratoVirt in Kata
This commit is contained in:
Fabiano Fidêncio 2023-11-17 10:53:17 +01:00 committed by GitHub
commit f8322ffad2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 2660 additions and 7 deletions

View File

@ -22,7 +22,7 @@ jobs:
fail-fast: false
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu']
vmm: ['clh', 'qemu', 'stratovirt']
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
@ -60,7 +60,7 @@ jobs:
fail-fast: false
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu']
vmm: ['clh', 'qemu', 'stratovirt']
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}
@ -101,7 +101,7 @@ jobs:
fail-fast: false
matrix:
containerd_version: ['lts', 'active']
vmm: ['clh', 'qemu', 'dragonball']
vmm: ['clh', 'qemu', 'dragonball', 'stratovirt']
runs-on: garm-ubuntu-2204-smaller
env:
CONTAINERD_VERSION: ${{ matrix.containerd_version }}

View File

@ -48,6 +48,7 @@ jobs:
- qemu
- qemu-snp-experimental
- qemu-tdx-experimental
- stratovirt
- rootfs-image
- rootfs-image-tdx
- rootfs-initrd

View File

@ -33,6 +33,7 @@ jobs:
- kernel-dragonball-experimental
- nydus
- qemu
- stratovirt
- rootfs-image
- rootfs-initrd
- shim-v2

View File

@ -33,6 +33,7 @@ jobs:
- clh
- dragonball
- qemu
- stratovirt
instance-type:
- small
- normal

View File

@ -48,7 +48,7 @@ jobs:
# all the tests due to a single flaky instance.
fail-fast: false
matrix:
vmm: ['clh', 'qemu']
vmm: ['clh', 'qemu', 'stratovirt']
max-parallel: 1
runs-on: metrics
env:

View File

@ -80,6 +80,7 @@ QEMUBINDIR := $(PREFIXDEPS)/bin
CLHBINDIR := $(PREFIXDEPS)/bin
FCBINDIR := $(PREFIXDEPS)/bin
ACRNBINDIR := $(PREFIXDEPS)/bin
STRATOVIRTBINDIR := $(PREFIXDEPS)/bin
SYSCONFDIR := /etc
LOCALSTATEDIR := /var
@ -103,6 +104,7 @@ GENERATED_VARS = \
CONFIG_QEMU_SNP_IN \
CONFIG_CLH_IN \
CONFIG_FC_IN \
CONFIG_STRATOVIRT_IN \
$(USER_VARS)
SCRIPTS += $(COLLECT_SCRIPT)
SCRIPTS_DIR := $(BINDIR)
@ -146,12 +148,13 @@ HYPERVISOR_ACRN = acrn
HYPERVISOR_FC = firecracker
HYPERVISOR_QEMU = qemu
HYPERVISOR_CLH = cloud-hypervisor
HYPERVISOR_STRATOVIRT = stratovirt
# Determines which hypervisor is specified in $(CONFIG_FILE).
DEFAULT_HYPERVISOR ?= $(HYPERVISOR_QEMU)
# List of hypervisors this build system can generate configuration for.
HYPERVISORS := $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVISOR_CLH)
HYPERVISORS := $(HYPERVISOR_ACRN) $(HYPERVISOR_FC) $(HYPERVISOR_QEMU) $(HYPERVISOR_CLH) $(HYPERVISOR_STRATOVIRT)
QEMUPATH := $(QEMUBINDIR)/$(QEMUCMD)
QEMUVALIDHYPERVISORPATHS := [\"$(QEMUPATH)\"]
@ -177,6 +180,9 @@ ACRNVALIDHYPERVISORPATHS := [\"$(ACRNPATH)\"]
ACRNCTLPATH := $(ACRNBINDIR)/$(ACRNCTLCMD)
ACRNVALIDCTLPATHS := [\"$(ACRNCTLPATH)\"]
STRATOVIRTPATH = $(STRATOVIRTBINDIR)/$(STRATOVIRTCMD)
STRATOVIRTVALIDHYPERVISORPATHS := [\"$(STRATOVIRTPATH)\"]
# Default number of vCPUs
DEFVCPUS := 1
# Default maximum number of vCPUs
@ -219,6 +225,7 @@ DEFVALIDENTROPYSOURCES := [\"/dev/urandom\",\"/dev/random\",\"\"]
DEFDISABLEBLOCK := false
DEFSHAREDFS_CLH_VIRTIOFS := virtio-fs
DEFSHAREDFS_QEMU_VIRTIOFS := virtio-fs
DEFSHAREDFS_STRATOVIRT_VIRTIOFS := virtio-fs
DEFSHAREDFS_QEMU_TDX_VIRTIOFS := virtio-9p
DEFSHAREDFS_QEMU_SEV_VIRTIOFS := virtio-9p
DEFSHAREDFS_QEMU_SNP_VIRTIOFS := virtio-9p
@ -381,6 +388,36 @@ ifneq (,$(CLHCMD))
KERNELPATH_CLH = $(KERNELDIR)/$(KERNEL_NAME_CLH)
endif
ifneq (,$(STRATOVIRTCMD))
KNOWN_HYPERVISORS += $(HYPERVISOR_STRATOVIRT)
CONFIG_FILE_STRATOVIRT = configuration-stratovirt.toml
CONFIG_STRATOVIRT = config/$(CONFIG_FILE_STRATOVIRT)
CONFIG_STRATOVIRT_IN = $(CONFIG_STRATOVIRT).in
CONFIG_PATH_STRATOVIRT = $(abspath $(CONFDIR)/$(CONFIG_FILE_STRATOVIRT))
CONFIG_PATHS += $(CONFIG_PATH_STRATOVIRT)
SYSCONFIG_STRATOVIRT = $(abspath $(SYSCONFDIR)/$(CONFIG_FILE_STRATOVIRT))
SYSCONFIG_PATHS += $(SYSCONFIG_STRATOVIRT)
CONFIGS += $(CONFIG_STRATOVIRT)
# stratovirt-specific options (all should be suffixed by "_STRATOVIRT")
DEFMACHINETYPE_STRATOVIRT := microvm
DEFBLOCKSTORAGEDRIVER_STRATOVIRT := virtio-mmio
DEFNETWORKMODEL_STRATOVIRT := tcfilter
DEFSTATICRESOURCEMGMT_STRATOVIRT = true
ifeq ($(ARCH),amd64)
KERNELTYPE_STRATOVIRT = compressed
endif
ifeq ($(ARCH),arm64)
KERNELTYPE_STRATOVIRT = uncompressed
endif
KERNEL_NAME_STRATOVIRT = $(call MAKE_KERNEL_NAME,$(KERNELTYPE_STRATOVIRT))
KERNELPATH_STRATOVIRT = $(KERNELDIR)/$(KERNEL_NAME_STRATOVIRT)
endif
ifneq (,$(FCCMD))
KNOWN_HYPERVISORS += $(HYPERVISOR_FC)
@ -479,6 +516,7 @@ USER_VARS += BINDIR
USER_VARS += CONFIG_ACRN_IN
USER_VARS += CONFIG_CLH_IN
USER_VARS += CONFIG_FC_IN
USER_VARS += CONFIG_STRATOVIRT_IN
USER_VARS += CONFIG_PATH
USER_VARS += CONFIG_QEMU_IN
USER_VARS += DESTDIR
@ -497,6 +535,8 @@ USER_VARS += FCPATH
USER_VARS += FCVALIDHYPERVISORPATHS
USER_VARS += FCJAILERPATH
USER_VARS += FCVALIDJAILERPATHS
USER_VARS += STRATOVIRTPATH
USER_VARS += STRATOVIRTVALIDHYPERVISORPATHS
USER_VARS += SYSCONFIG
USER_VARS += IMAGENAME
USER_VARS += IMAGETDXNAME
@ -520,6 +560,7 @@ USER_VARS += KERNELTDXPATH
USER_VARS += KERNELSNPPATH
USER_VARS += KERNELPATH_CLH
USER_VARS += KERNELPATH_FC
USER_VARS += KERNELPATH_STRATOVIRT
USER_VARS += KERNELVIRTIOFSPATH
USER_VARS += FIRMWAREPATH
USER_VARS += FIRMWARESEVPATH
@ -531,6 +572,7 @@ USER_VARS += MACHINEACCELERATORS
USER_VARS += CPUFEATURES
USER_VARS += TDXCPUFEATURES
USER_VARS += DEFMACHINETYPE_CLH
USER_VARS += DEFMACHINETYPE_STRATOVIRT
USER_VARS += KERNELPARAMS
USER_VARS += KERNELTDXPARAMS
USER_VARS += LIBEXECDIR
@ -572,6 +614,7 @@ USER_VARS += DEFNETWORKMODEL_ACRN
USER_VARS += DEFNETWORKMODEL_CLH
USER_VARS += DEFNETWORKMODEL_FC
USER_VARS += DEFNETWORKMODEL_QEMU
USER_VARS += DEFNETWORKMODEL_STRATOVIRT
USER_VARS += DEFDISABLEGUESTEMPTYDIR
USER_VARS += DEFDISABLEGUESTSECCOMP
USER_VARS += DEFDISABLESELINUX
@ -582,9 +625,11 @@ USER_VARS += DEFDISABLEBLOCK
USER_VARS += DEFBLOCKSTORAGEDRIVER_ACRN
USER_VARS += DEFBLOCKSTORAGEDRIVER_FC
USER_VARS += DEFBLOCKSTORAGEDRIVER_QEMU
USER_VARS += DEFBLOCKSTORAGEDRIVER_STRATOVIRT
USER_VARS += DEFBLOCKDEVICEAIO_QEMU
USER_VARS += DEFSHAREDFS_CLH_VIRTIOFS
USER_VARS += DEFSHAREDFS_QEMU_VIRTIOFS
USER_VARS += DEFSHAREDFS_STRATOVIRT_VIRTIOFS
USER_VARS += DEFSHAREDFS_QEMU_TDX_VIRTIOFS
USER_VARS += DEFSHAREDFS_QEMU_SEV_VIRTIOFS
USER_VARS += DEFSHAREDFS_QEMU_SNP_VIRTIOFS
@ -609,6 +654,7 @@ USER_VARS += DEFSANDBOXCGROUPONLY
USER_VARS += DEFSTATICRESOURCEMGMT
USER_VARS += DEFSTATICRESOURCEMGMT_CLH
USER_VARS += DEFSTATICRESOURCEMGMT_FC
USER_VARS += DEFSTATICRESOURCEMGMT_STRATOVIRT
USER_VARS += DEFSTATICRESOURCEMGMT_TEE
USER_VARS += DEFBINDMOUNTS
USER_VARS += DEFSERVICEOFFLOAD
@ -940,6 +986,9 @@ ifneq (,$(findstring $(HYPERVISOR_FC),$(KNOWN_HYPERVISORS)))
endif
ifneq (,$(findstring $(HYPERVISOR_ACRN),$(KNOWN_HYPERVISORS)))
@printf "\t$(HYPERVISOR_ACRN) hypervisor path (ACRNPATH) : %s\n" $(abspath $(ACRNPATH))
endif
ifneq (,$(findstring $(HYPERVISOR_STRATOVIRT),$(KNOWN_HYPERVISORS)))
@printf "\t$(HYPERVISOR_STRATOVIRT) hypervisor path (STRATOVIRTPATH) : %s\n" $(abspath $(STRATOVIRTPATH))
endif
@printf "\tassets path (PKGDATADIR) : %s\n" $(abspath $(PKGDATADIR))
@printf "\tshim path (PKGLIBEXECDIR) : %s\n" $(abspath $(PKGLIBEXECDIR))

View File

@ -28,3 +28,6 @@ ACRNCTLCMD := acrnctl
CLHCMD := cloud-hypervisor
DEFSTATICRESOURCEMGMT_CLH := false
# stratovirt binary name
STRATOVIRTCMD := stratovirt

View File

@ -21,3 +21,6 @@ FCJAILERCMD := jailer
CLHCMD := cloud-hypervisor
DEFSTATICRESOURCEMGMT_CLH := true
# stratovirt binary name
STRATOVIRTCMD := stratovirt

View File

@ -115,6 +115,8 @@ func setCPUtype(hypervisorType vc.HypervisorType) error {
}
switch hypervisorType {
case vc.StratovirtHypervisor:
fallthrough
case vc.FirecrackerHypervisor:
fallthrough
case vc.ClhHypervisor:
@ -315,6 +317,8 @@ func archHostCanCreateVMContainer(hypervisorType vc.HypervisorType) error {
fallthrough
case vc.ClhHypervisor:
fallthrough
case vc.StratovirtHypervisor:
fallthrough
case vc.FirecrackerHypervisor:
return kvmIsUsable()
case vc.AcrnHypervisor:

View File

@ -0,0 +1,394 @@
# Copyright (c) 2023 Huawei Technologies Co.,Ltd.
#
# SPDX-License-Identifier: Apache-2.0
#
# XXX: WARNING: this file is auto-generated.
# XXX:
# XXX: Source file: "@CONFIG_STRATOVIRT_IN@"
# XXX: Project:
# XXX: Name: @PROJECT_NAME@
# XXX: Type: @PROJECT_TYPE@
[hypervisor.stratovirt]
path = "@STRATOVIRTPATH@"
kernel = "@KERNELPATH_STRATOVIRT@"
#image = "@IMAGEPATH@"
initrd = "@INITRDPATH@"
machine_type = "@DEFMACHINETYPE_STRATOVIRT@"
# rootfs filesystem type:
# - ext4 (default)
# - xfs
# - erofs
rootfs_type = @DEFROOTFSTYPE@
# List of valid annotation names for the hypervisor
# Each member of the list is a regular expression, which is the base name
# of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
enable_annotations = @DEFENABLEANNOTATIONS@
# List of valid annotations values for the hypervisor
# Each member of the list is a path pattern as described by glob(3).
# The default if not set is empty (all annotations rejected.)
# Your distribution recommends: @STRATOVIRTVALIDHYPERVISORPATHS@
valid_hypervisor_paths = @STRATOVIRTVALIDHYPERVISORPATHS@
# Optional space-separated list of options to pass to the guest kernel.
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
# trouble running pre-2.15 glibc.
#
# WARNING: - any parameter specified here will take priority over the default
# parameter value of the same name used to start the virtual machine.
# Do not set values here unless you understand the impact of doing so as you
# may stop the virtual machine from booting.
# To see the list of default parameters, enable hypervisor debug, create a
# container and look for 'default-kernel-parameters' log entries.
kernel_params = "@KERNELPARAMS@"
# Default number of vCPUs per SB/VM:
# unspecified or 0 --> will be set to @DEFVCPUS@
# < 0 --> will be set to the actual number of physical cores
# > 0 <= number of physical cores --> will be set to the specified number
# > number of physical cores --> will be set to the actual number of physical cores
default_vcpus = 1
# Default maximum number of vCPUs per SB/VM:
# unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number
# of vCPUs supported by KVM if that number is exceeded
# > 0 <= number of physical cores --> will be set to the specified number
# > number of physical cores --> will be set to the actual number of physical cores or to the maximum number
# of vCPUs supported by KVM if that number is exceeded
# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
# the actual number of physical cores is greater than it.
# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
# can be added to a SB/VM, but the memory footprint will be big. Another example, with
# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
# unless you know what are you doing.
# NOTICE: on arm platform with gicv2 interrupt controller, set it to 8.
default_maxvcpus = @DEFMAXVCPUS@
# Bridges can be used to hot plug devices.
# Limitations:
# * Currently only pci bridges are supported
# * Until 30 devices per bridge can be hot plugged.
# * Until 5 PCI bridges can be cold plugged per VM.
# This limitation could be a bug in the kernel
# Default number of bridges per SB/VM:
# unspecified or 0 --> will be set to @DEFBRIDGES@
# > 1 <= 5 --> will be set to the specified number
# > 5 --> will be set to 5
default_bridges = @DEFBRIDGES@
# Default memory size in MiB for SB/VM.
# If unspecified then it will be set @DEFMEMSZ@ MiB.
default_memory = @DEFMEMSZ@
#
# Default memory slots per SB/VM.
# If unspecified then it will be set @DEFMEMSLOTS@.
# This is will determine the times that memory will be hotadded to sandbox/VM.
#memory_slots = @DEFMEMSLOTS@
# Default maximum memory in MiB per SB / VM
# unspecified or == 0 --> will be set to the actual amount of physical RAM
# > 0 <= amount of physical RAM --> will be set to the specified number
# > amount of physical RAM --> will be set to the actual amount of physical RAM
default_maxmemory = @DEFMAXMEMSZ@
# The size in MiB will be plused to max memory of hypervisor.
# It is the memory address space for the NVDIMM devie.
# If set block storage driver (block_device_driver) to "nvdimm",
# should set memory_offset to the size of block device.
# Default 0
#memory_offset = 0
# Disable block device from being used for a container's rootfs.
# In case of a storage driver like devicemapper where a container's
# root file system is backed by a block device, the block device is passed
# directly to the hypervisor for performance reasons.
# This flag prevents the block device from being passed to the hypervisor,
# virtio-fs is used instead to pass the rootfs.
disable_block_device_use = @DEFDISABLEBLOCK@
# Shared file system type:
# - virtio-fs (default)
# - virtio-fs-nydus
# - none
shared_fs = "@DEFSHAREDFS_STRATOVIRT_VIRTIOFS@"
# Path to vhost-user-fs daemon.
virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
# List of valid annotations values for the virtiofs daemon
# The default if not set is empty (all annotations rejected.)
valid_virtio_fs_daemon_paths = @DEFVALIDVIRTIOFSDAEMONPATHS@
# Default size of DAX cache in MiB
virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
# Extra args for virtiofsd daemon
#
# Format example:
# ["--arg1=xxx", "--arg2=yyy"]
# Examples:
# Set virtiofsd log level to debug : ["--log-level=debug"]
#
# see `virtiofsd -h` for possible options.
virtio_fs_extra_args = @DEFVIRTIOFSEXTRAARGS@
# Cache mode:
#
# - never
# Metadata, data, and pathname lookup are not cached in guest. They are
# always fetched from host and any changes are immediately pushed to host.
#
# - auto
# Metadata and pathname lookup cache expires after a configured amount of
# time (default is 1 second). Data is cached while the file is open (close
# to open consistency).
#
# - always
# Metadata, data, and pathname lookup are cached in guest and never expire.
virtio_fs_cache = "@DEFVIRTIOFSCACHE@"
# Block storage driver to be used for the hypervisor in case the container
# rootfs is backed by a block device. This is virtio-scsi, virtio-blk
# or nvdimm.
block_device_driver = "@DEFBLOCKSTORAGEDRIVER_STRATOVIRT@"
# Specifies cache-related options will be set to block devices or not.
# Default false
#block_device_cache_set = true
# Specifies cache-related options for block devices.
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
# Default false
#block_device_cache_direct = true
# Specifies cache-related options for block devices.
# Denotes whether flush requests for the device are ignored.
# Default false
#block_device_cache_noflush = true
# Enable huge pages for VM RAM, default false
# Enabling this will result in the VM memory
# being allocated using huge pages.
# This is useful when you want to use vhost-user network
# stacks within the container. This will automatically
# result in memory pre allocation
#enable_hugepages = true
# Enable vIOMMU, default false
# Enabling this will result in the VM having a vIOMMU device
# This will also add the following options to the kernel's
# command line: intel_iommu=on,iommu=pt
#enable_iommu = true
# This option changes the default hypervisor and kernel parameters
# to enable debug output where available.
#
# Default false
#enable_debug = true
# Disable the customizations done in the runtime when it detects
# that it is running on top a VMM. This will result in the runtime
# behaving as it would when running on bare metal.
#
#disable_nesting_checks = true
#
# Default entropy source.
# The path to a host source of entropy (including a real hardware RNG)
# /dev/urandom and /dev/random are two main options.
# Be aware that /dev/random is a blocking source of entropy. If the host
# runs out of entropy, the VMs boot time will increase leading to get startup
# timeouts.
# The source of entropy /dev/urandom is non-blocking and provides a
# generally acceptable source of entropy. It should work well for pretty much
# all practical purposes.
entropy_source = "@DEFENTROPYSOURCE@"
# Path to OCI hook binaries in the *guest rootfs*.
# This does not affect host-side hooks which must instead be added to
# the OCI spec passed to the runtime.
#
# You can create a rootfs with hooks by customizing the osbuilder scripts:
# https://github.com/kata-containers/kata-containers/tree/main/tools/osbuilder
#
# Hooks must be stored in a subdirectory of guest_hook_path according to their
# hook type, i.e. "guest_hook_path/{prestart,poststart,poststop}".
# The agent will scan these directories for executable files and add them, in
# lexicographical order, to the lifecycle of the guest container.
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
# Warnings will be logged if any error is encountered while scanning for hooks,
# but it will not abort container execution.
#guest_hook_path = "/usr/share/oci/hooks"
# disable applying SELinux on the VMM process (default false)
disable_selinux = @DEFDISABLESELINUX@
# disable applying SELinux on the container process
# If set to false, the type `container_t` is applied to the container process by default.
# Note: To enable guest SELinux, the guest rootfs must be CentOS that is created and built
# with `SELINUX=yes`.
# (default: true)
disable_guest_selinux = @DEFDISABLEGUESTSELINUX@
[factory]
# VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and
# agent memory by mapping it readonly. It helps speeding up new container
# creation and saves a lot of memory if there are many kata containers running
# on the same host.
#
# When disabled, new VMs are created from scratch.
#
# Note: Requires "initrd=" to be set ("image=" is not supported).
#
# Default false
#enable_template = true
[agent.@PROJECT_TYPE@]
# If enabled, make the agent display debug-level messages.
# (default: disabled)
#enable_debug = true
# Enable agent tracing.
#
# If enabled, the agent will generate OpenTelemetry trace spans.
#
# Notes:
#
# - If the runtime also has tracing enabled, the agent spans will be
# associated with the appropriate runtime parent span.
# - If enabled, the runtime will wait for the container to shutdown,
# increasing the container shutdown time slightly.
#
# (default: disabled)
#enable_tracing = true
# Comma separated list of kernel modules and their parameters.
# These modules will be loaded in the guest kernel using modprobe(8).
# The following example can be used to load two kernel modules with parameters
# - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
# The first word is considered as the module name and the rest as its parameters.
# Container will not be started when:
# * A kernel module is specified and the modprobe command is not installed in the guest
# or it fails loading the module.
# * The module is not available in the guest or it doesn't met the guest kernel
# requirements, like architecture and version.
#
kernel_modules = []
# Enable debug console.
# If enabled, user can connect guest OS running inside hypervisor
# through "kata-runtime exec <sandbox-id>" command
#debug_console_enabled = true
# Agent connection dialing timeout value in seconds
# (default: 45)
dial_timeout = 45
[runtime]
# If enabled, the runtime will log additional debug messages to the
# system log
# (default: disabled)
#enable_debug = true
#
# Internetworking model
# Determines how the VM should be connected to the
# the container network interface
# Options:
#
# - macvtap
# Used when the Container network interface can be bridged using
# macvtap.
#
# - none
# Used when customize network. Only creates a tap device. No veth pair.
#
# - tcfilter
# Uses tc filter rules to redirect traffic from the network interface
# provided by plugin to a tap interface connected to the VM.
#
internetworking_model = "@DEFNETWORKMODEL_STRATOVIRT@"
# disable guest seccomp
# Determines whether container seccomp profiles are passed to the virtual
# machine and applied by the kata agent. If set to true, seccomp is not applied
# within the guest
# (default: true)
disable_guest_seccomp = @DEFDISABLEGUESTSECCOMP@
# vCPUs pinning settings
# if enabled, each vCPU thread will be scheduled to a fixed CPU
# qualified condition: num(vCPU threads) == num(CPUs in sandbox's CPUSet)
#enable_vcpus_pinning = false
# Apply a custom SELinux security policy to the container process inside the VM.
# This is used when you want to apply a type other than the default `container_t`,
# so general users should not uncomment and apply it.
# (format: "user:role:type")
# Note: You cannot specify MCS policy with the label because the sensitivity levels and
# categories are determined automatically by high-level container runtimes such as containerd.
#guest_selinux_label = "@DEFGUESTSELINUXLABEL@"
# If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled)
#enable_tracing = true
# Set the full url to the Jaeger HTTP Thrift collector.
# The default if not set will be "http://localhost:14268/api/traces"
#jaeger_endpoint = ""
# Sets the username to be used if basic auth is required for Jaeger.
#jaeger_user = ""
# Sets the password to be used if basic auth is required for Jaeger.
#jaeger_password = ""
# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
# (like OVS) directly.
# (default: false)
#disable_new_netns = true
# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
# The container cgroups in the host are not created, just one single cgroup per sandbox.
# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
# The sandbox cgroup is constrained if there is no container type annotation.
# See: https://pkg.go.dev/github.com/kata-containers/kata-containers/src/runtime/virtcontainers#ContainerType
sandbox_cgroup_only = @DEFSANDBOXCGROUPONLY@
# If enabled, the runtime will attempt to determine appropriate sandbox size (memory, CPU) before booting the virtual machine. In
# this case, the runtime will not dynamically update the amount of memory and CPU in the virtual machine. This is generally helpful
# when a hardware architecture or hypervisor solutions is utilized which does not support CPU and/or memory hotplug.
# Compatibility for determining appropriate sandbox (VM) size:
# - When running with pods, sandbox sizing information will only be available if using Kubernetes >= 1.23 and containerd >= 1.6. CRI-O
# does not yet support sandbox sizing annotations.
# - When running single containers using a tool like ctr, container sizing information will be available.
static_sandbox_resource_mgmt = @DEFSTATICRESOURCEMGMT_STRATOVIRT@
# If enabled, the runtime will not create Kubernetes emptyDir mounts on the guest filesystem. Instead, emptyDir mounts will
# be created on the host and shared via virtio-fs. This is potentially slower, but allows sharing of files from host to guest.
disable_guest_empty_dir = @DEFDISABLEGUESTEMPTYDIR@
# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# they may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# (default: [])
experimental = @DEFAULTEXPFEATURES@
# If enabled, user can run pprof tools with shim v2 process through kata-monitor.
# (default: false)
#enable_pprof = true

View File

@ -52,6 +52,7 @@ const (
qemuHypervisorTableType = "qemu"
acrnHypervisorTableType = "acrn"
dragonballHypervisorTableType = "dragonball"
stratovirtHypervisorTableType = "stratovirt"
// the maximum amount of PCI bridges that can be cold plugged in a VM
maxPCIBridges uint32 = 5
@ -1141,6 +1142,106 @@ func newDragonballHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
}, nil
}
func newStratovirtHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
hypervisor, err := h.path()
if err != nil {
return vc.HypervisorConfig{}, err
}
kernel, err := h.kernel()
if err != nil {
return vc.HypervisorConfig{}, err
}
initrd, err := h.initrd()
if err != nil {
return vc.HypervisorConfig{}, err
}
image, err := h.image()
if err != nil {
return vc.HypervisorConfig{}, err
}
if image != "" && initrd != "" {
return vc.HypervisorConfig{},
errors.New("having both an image and an initrd defined in the configuration file is not supported")
}
if image == "" && initrd == "" {
return vc.HypervisorConfig{},
errors.New("image or initrd must be defined in the configuration file")
}
rootfsType, err := h.rootfsType()
if err != nil {
return vc.HypervisorConfig{}, err
}
kernelParams := h.kernelParams()
machineType := h.machineType()
blockDriver, err := h.blockDeviceDriver()
if err != nil {
return vc.HypervisorConfig{}, err
}
if vSock, err := utils.SupportsVsocks(); !vSock {
return vc.HypervisorConfig{}, err
}
sharedFS, err := h.sharedFS()
if err != nil {
return vc.HypervisorConfig{}, err
}
if sharedFS != config.VirtioFS && sharedFS != config.VirtioFSNydus && sharedFS != config.NoSharedFS {
return vc.HypervisorConfig{},
fmt.Errorf("Stratovirt Hypervisor does not support %s shared filesystem option", sharedFS)
}
if (sharedFS == config.VirtioFS || sharedFS == config.VirtioFSNydus) && h.VirtioFSDaemon == "" {
return vc.HypervisorConfig{},
fmt.Errorf("cannot enable %s without daemon path in configuration file", sharedFS)
}
return vc.HypervisorConfig{
HypervisorPath: hypervisor,
HypervisorPathList: h.HypervisorPathList,
KernelPath: kernel,
InitrdPath: initrd,
ImagePath: image,
RootfsType: rootfsType,
KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)),
HypervisorMachineType: machineType,
NumVCPUsF: h.defaultVCPUs(),
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
MemorySize: h.defaultMemSz(),
MemSlots: h.defaultMemSlots(),
MemOffset: h.defaultMemOffset(),
DefaultMaxMemorySize: h.defaultMaxMemSz(),
EntropySource: h.GetEntropySource(),
DefaultBridges: h.defaultBridges(),
DisableBlockDeviceUse: h.DisableBlockDeviceUse,
SharedFS: sharedFS,
VirtioFSDaemon: h.VirtioFSDaemon,
VirtioFSDaemonList: h.VirtioFSDaemonList,
VirtioFSCacheSize: h.VirtioFSCacheSize,
VirtioFSCache: h.defaultVirtioFSCache(),
VirtioFSExtraArgs: h.VirtioFSExtraArgs,
HugePages: h.HugePages,
Debug: h.Debug,
DisableNestingChecks: h.DisableNestingChecks,
BlockDeviceDriver: blockDriver,
DisableVhostNet: true,
GuestHookPath: h.guestHookPath(),
EnableAnnotations: h.EnableAnnotations,
DisableSeccomp: h.DisableSeccomp,
DisableSeLinux: h.DisableSeLinux,
DisableGuestSeLinux: h.DisableGuestSeLinux,
}, nil
}
func newFactoryConfig(f factory) (oci.FactoryConfig, error) {
if f.TemplatePath == "" {
f.TemplatePath = defaultTemplatePath
@ -1177,6 +1278,9 @@ func updateRuntimeConfigHypervisor(configPath string, tomlConf tomlConfig, confi
case dragonballHypervisorTableType:
config.HypervisorType = vc.DragonballHypervisor
hConfig, err = newDragonballHypervisorConfig(hypervisor)
case stratovirtHypervisorTableType:
config.HypervisorType = vc.StratovirtHypervisor
hConfig, err = newStratovirtHypervisorConfig(hypervisor)
default:
err = fmt.Errorf("%s: %+q", errInvalidHypervisorPrefix, k)
}

View File

@ -48,6 +48,9 @@ const (
// ClhHypervisor is the ICH hypervisor.
ClhHypervisor HypervisorType = "clh"
// StratovirtHypervisor is the StratoVirt hypervisor.
StratovirtHypervisor HypervisorType = "stratovirt"
// DragonballHypervisor is the Dragonball hypervisor.
DragonballHypervisor HypervisorType = "dragonball"
@ -256,6 +259,8 @@ func (hType *HypervisorType) String() string {
return string(AcrnHypervisor)
case ClhHypervisor:
return string(ClhHypervisor)
case StratovirtHypervisor:
return string(StratovirtHypervisor)
case MockHypervisor:
return string(MockHypervisor)
default:

View File

@ -36,6 +36,8 @@ func NewHypervisor(hType HypervisorType) (Hypervisor, error) {
return &Acrn{}, nil
case ClhHypervisor:
return &cloudHypervisor{}, nil
case StratovirtHypervisor:
return &stratovirt{}, nil
case DragonballHypervisor:
return &mockHypervisor{}, nil
case MockHypervisor:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,432 @@
//go:build linux
// Copyright (c) 2023 Huawei Technologies Co.,Ltd.
//
// SPDX-License-Identifier: Apache-2.0
//
package virtcontainers
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/kata-containers/kata-containers/src/runtime/pkg/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func newStratovirtConfig() (HypervisorConfig, error) {
setupStratovirt()
if testStratovirtPath == "" {
return HypervisorConfig{}, errors.New("hypervisor fake path is empty")
}
if testVirtiofsdPath == "" {
return HypervisorConfig{}, errors.New("virtiofsd fake path is empty")
}
if _, err := os.Stat(testStratovirtPath); os.IsNotExist(err) {
return HypervisorConfig{}, err
}
if _, err := os.Stat(testVirtiofsdPath); os.IsNotExist(err) {
return HypervisorConfig{}, err
}
return HypervisorConfig{
HypervisorPath: testStratovirtPath,
KernelPath: testStratovirtKernelPath,
InitrdPath: testStratovirtInitrdPath,
RootfsType: string(EXT4),
NumVCPUsF: defaultVCPUs,
BlockDeviceDriver: config.VirtioBlock,
MemorySize: defaultMemSzMiB,
DefaultMaxVCPUs: uint32(64),
SharedFS: config.VirtioFS,
VirtioFSCache: typeVirtioFSCacheModeAlways,
VirtioFSDaemon: testVirtiofsdPath,
}, nil
}
func TestStratovirtCreateVM(t *testing.T) {
assert := assert.New(t)
store, err := persist.GetDriver()
assert.NoError(err)
network, err := NewNetwork()
assert.NoError(err)
sv := stratovirt{
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
config0, err := newStratovirtConfig()
assert.NoError(err)
config1, err := newStratovirtConfig()
assert.NoError(err)
config1.ImagePath = testStratovirtImagePath
config1.InitrdPath = ""
config2, err := newStratovirtConfig()
assert.NoError(err)
config2.Debug = true
config3, err := newStratovirtConfig()
assert.NoError(err)
config3.SharedFS = config.VirtioFS
config4, err := newStratovirtConfig()
assert.NoError(err)
config4.SharedFS = config.VirtioFSNydus
type testData struct {
config HypervisorConfig
expectError bool
configMatch bool
}
data := []testData{
{config0, false, true},
{config1, false, true},
{config2, false, true},
{config3, false, true},
{config4, false, true},
}
for i, d := range data {
msg := fmt.Sprintf("test[%d]", i)
err = sv.CreateVM(context.Background(), "testSandbox", network, &d.config)
if d.expectError {
assert.Error(err, msg)
continue
}
assert.NoError(err, msg)
if d.configMatch {
assert.Exactly(d.config, sv.config, msg)
}
}
}
func TestStratovirtStartSandbox(t *testing.T) {
assert := assert.New(t)
sConfig, err := newStratovirtConfig()
assert.NoError(err)
sConfig.Debug = true
network, err := NewNetwork()
assert.NoError(err)
store, err := persist.GetDriver()
assert.NoError(err)
sConfig.VMStorePath = store.RunVMStoragePath()
sConfig.RunStorePath = store.RunStoragePath()
sv := &stratovirt{
config: sConfig,
virtiofsDaemon: &virtiofsdMock{},
}
assert.Exactly(sv.stopped.Load(), false)
err = sv.CreateVM(context.Background(), "testSandbox", network, &sConfig)
assert.NoError(err)
mem := sv.GetTotalMemoryMB(context.Background())
assert.True(mem > 0)
err = sv.StartVM(context.Background(), 10)
assert.Error(err)
}
func TestStratovirtCleanupVM(t *testing.T) {
assert := assert.New(t)
store, err := persist.GetDriver()
assert.NoError(err, "persist.GetDriver() unexpected error")
sv := &stratovirt{
id: "cleanVM",
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
sv.svConfig.vmPath = filepath.Join(sv.config.VMStorePath, sv.id)
sv.config.VMid = "cleanVM"
err = sv.cleanupVM(true)
assert.NoError(err, "persist.GetDriver() unexpected error")
dir := filepath.Join(store.RunVMStoragePath(), sv.id)
os.MkdirAll(dir, os.ModePerm)
err = sv.cleanupVM(false)
assert.NoError(err, "persist.GetDriver() unexpected error")
_, err = os.Stat(dir)
assert.Error(err, "dir should not exist %s", dir)
assert.True(os.IsNotExist(err), "persist.GetDriver() unexpected error")
}
func TestStratovirtAddFsDevice(t *testing.T) {
assert := assert.New(t)
sConfig, err := newStratovirtConfig()
assert.NoError(err)
sConfig.SharedFS = config.VirtioFS
mountTag := "testMountTag"
sv := &stratovirt{
ctx: context.Background(),
config: sConfig,
}
volume := types.Volume{
MountTag: mountTag,
}
expected := []VirtioDev{
virtioFs{
backend: "socket",
charID: "virtio_fs",
charDev: "virtio_fs",
tag: volume.MountTag,
deviceID: "virtio-fs0",
driver: mmioBus,
},
}
err = sv.AddDevice(context.Background(), volume, FsDev)
assert.NoError(err)
assert.Exactly(sv.svConfig.devices, expected)
}
func TestStratovirtAddBlockDevice(t *testing.T) {
assert := assert.New(t)
sConfig, err := newStratovirtConfig()
assert.NoError(err)
sv := &stratovirt{
ctx: context.Background(),
config: sConfig,
}
blockDrive := config.BlockDrive{}
expected := []VirtioDev{
blkDevice{
id: "rootfs",
filePath: sv.svConfig.rootfsPath,
deviceID: "virtio-blk0",
driver: mmioBus,
},
}
err = sv.AddDevice(context.Background(), blockDrive, BlockDev)
assert.NoError(err)
assert.Exactly(sv.svConfig.devices, expected)
}
func TestStratovirtAddVsockDevice(t *testing.T) {
assert := assert.New(t)
sConfig, err := newStratovirtConfig()
assert.NoError(err)
dir := t.TempDir()
vsockFilename := filepath.Join(dir, "vsock")
contextID := uint64(3)
port := uint32(1024)
vsockFile, fileErr := os.Create(vsockFilename)
assert.NoError(fileErr)
defer vsockFile.Close()
sv := &stratovirt{
ctx: context.Background(),
config: sConfig,
}
vsock := types.VSock{
ContextID: contextID,
Port: port,
VhostFd: vsockFile,
}
expected := []VirtioDev{
vhostVsock{
id: "vsock-id",
guestID: fmt.Sprintf("%d", contextID),
VHostFD: vsockFile,
driver: mmioBus,
},
}
err = sv.AddDevice(context.Background(), vsock, VSockPCIDev)
assert.NoError(err)
assert.Exactly(sv.svConfig.devices, expected)
}
func TestStratovirtAddConsole(t *testing.T) {
assert := assert.New(t)
sConfig, err := newStratovirtConfig()
assert.NoError(err)
sv := &stratovirt{
ctx: context.Background(),
config: sConfig,
}
sock := types.Socket{}
expected := []VirtioDev{
consoleDevice{
id: "virtio-serial0",
backend: "socket",
charID: "charconsole0",
devType: "virtconsole",
charDev: "charconsole0",
deviceID: "virtio-console0",
driver: mmioBus,
},
}
err = sv.AddDevice(context.Background(), sock, SerialPortDev)
assert.NoError(err)
assert.Exactly(sv.svConfig.devices, expected)
}
func TestStratovirtGetSandboxConsole(t *testing.T) {
assert := assert.New(t)
store, err := persist.GetDriver()
assert.NoError(err)
sandboxID := "testSandboxID"
sv := &stratovirt{
id: sandboxID,
ctx: context.Background(),
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
expected := filepath.Join(store.RunVMStoragePath(), sandboxID, debugSocket)
proto, result, err := sv.GetVMConsole(sv.ctx, sandboxID)
assert.NoError(err)
assert.Equal(result, expected)
assert.Equal(proto, consoleProtoUnix)
}
func TestStratovirtCapabilities(t *testing.T) {
assert := assert.New(t)
sConfig, err := newStratovirtConfig()
assert.NoError(err)
sv := stratovirt{}
assert.Equal(sv.config, HypervisorConfig{})
sConfig.SharedFS = config.VirtioFS
err = sv.setConfig(&sConfig)
assert.NoError(err)
var ctx context.Context
c := sv.Capabilities(ctx)
assert.True(c.IsFsSharingSupported())
sConfig.SharedFS = config.NoSharedFS
err = sv.setConfig(&sConfig)
assert.NoError(err)
c = sv.Capabilities(ctx)
assert.False(c.IsFsSharingSupported())
}
func TestStratovirtSetConfig(t *testing.T) {
assert := assert.New(t)
config, err := newStratovirtConfig()
assert.NoError(err)
sv := stratovirt{}
assert.Equal(sv.config, HypervisorConfig{})
err = sv.setConfig(&config)
assert.NoError(err)
assert.Equal(sv.config, config)
}
func TestStratovirtCleanup(t *testing.T) {
assert := assert.New(t)
sConfig, err := newStratovirtConfig()
assert.NoError(err)
sv := &stratovirt{
ctx: context.Background(),
config: sConfig,
}
err = sv.Cleanup(sv.ctx)
assert.Nil(err)
}
func TestStratovirtGetpids(t *testing.T) {
assert := assert.New(t)
sv := &stratovirt{}
pids := sv.GetPids()
assert.NotNil(pids)
assert.True(len(pids) == 1)
assert.True(pids[0] == 0)
}
func TestStratovirtBinPath(t *testing.T) {
assert := assert.New(t)
f, err := os.CreateTemp("", "stratovirt")
assert.NoError(err)
defer func() { _ = f.Close() }()
defer func() { _ = os.Remove(f.Name()) }()
expectedPath := f.Name()
sConfig, err := newStratovirtConfig()
assert.NoError(err)
sConfig.HypervisorPath = expectedPath
sv := &stratovirt{
config: sConfig,
}
// get config hypervisor path
path, err := sv.binPath()
assert.NoError(err)
assert.Equal(path, expectedPath)
// config hypervisor path does not exist
sv.config.HypervisorPath = "/abc/xyz/123"
path, err = sv.binPath()
assert.Error(err)
assert.Equal(path, "")
// get default stratovirt hypervisor path
sv.config.HypervisorPath = ""
path, err = sv.binPath()
if _, errStat := os.Stat(path); os.IsNotExist(errStat) {
assert.Error(err)
assert.Equal(path, "")
} else {
assert.NoError(err)
assert.Equal(path, defaultStratoVirt)
}
}

View File

@ -48,6 +48,10 @@ var testAcrnKernelPath = ""
var testAcrnImagePath = ""
var testAcrnPath = ""
var testAcrnCtlPath = ""
var testStratovirtKernelPath = ""
var testStratovirtImagePath = ""
var testStratovirtInitrdPath = ""
var testStratovirtPath = ""
var testVirtiofsdPath = ""
var testHyperstartCtlSocket = ""
@ -89,6 +93,18 @@ func setupClh() {
}
}
func setupStratovirt() {
os.Mkdir(filepath.Join(testDir, testBundle), DirMode)
for _, filename := range []string{testStratovirtKernelPath, testStratovirtInitrdPath, testStratovirtPath, testVirtiofsdPath} {
_, err := os.Create(filename)
if err != nil {
fmt.Printf("Could not recreate %s:%v", filename, err)
os.Exit(1)
}
}
}
// TestMain is the common main function used by ALL the test functions
// for this package.
func TestMain(m *testing.M) {
@ -149,6 +165,13 @@ func TestMain(m *testing.M) {
setupClh()
testStratovirtKernelPath = filepath.Join(testDir, testBundle, testKernel)
testStratovirtImagePath = filepath.Join(testDir, testBundle, testInitrd)
testStratovirtInitrdPath = filepath.Join(testDir, testBundle, testInitrd)
testStratovirtPath = filepath.Join(testDir, testBundle, testHypervisor)
setupStratovirt()
// set now that configStoragePath has been overridden.
sandboxDirState = filepath.Join(fs.MockRunStoragePath(), testSandboxID)

View File

@ -0,0 +1,162 @@
# Copyright (c) 2023 Huawei Technologies Co.,Ltd.
#
# SPDX-License-Identifier: Apache-2.0
#
# This file contains baseline expectations
# for checked results by checkmetrics tool.
[[metric]]
name = "boot-times"
type = "json"
description = "measure container lifecycle timings"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"boot-times\".Results | .[] | .\"to-workload\".Result"
checktype = "mean"
midval = 0.62
minpercent = 40.0
maxpercent = 40.0
[[metric]]
name = "memory-footprint"
type = "json"
description = "measure memory usage"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"memory-footprint\".Results | .[] | .average.Result"
checktype = "mean"
midval = 129842.10
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "memory-footprint-inside-container"
type = "json"
description = "measure memory inside the container"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"memory-footprint-inside-container\".Results | .[] | .memtotal.Result"
checktype = "mean"
midval = 2040568.0
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "blogbench"
type = "json"
description = "measure container average of blogbench write"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"blogbench\".Results | .[] | .write.Result"
checktype = "mean"
midval = 603.0
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "blogbench"
type = "json"
description = "measure container average of blogbench read"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"blogbench\".Results | .[] | .read.Result"
checktype = "mean"
midval = 37669.0
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "tensorflow_nhwc"
type = "json"
description = "tensorflow resnet model"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"tensorflow_nhwc\".Results | .[] | .resnet.Result"
checktype = "mean"
midval = 2025.0
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "tensorflow_nhwc"
type = "json"
description = "tensorflow alexnet model"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"tensorflow_nhwc\".Results | .[] | .alexnet.Result"
checktype = "mean"
midval = 75.0
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "latency"
type = "json"
description = "measure container latency"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"latency\".Results | .[] | .latency.Result"
checktype = "mean"
midval = 0.78
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "measure container cpu utilization using iperf3"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .cpu.Result"
checktype = "mean"
midval = 60.10
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "measure container bandwidth using iperf3"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .bandwidth.Result"
checktype = "mean"
midval = 19959440840.94
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "measure container parallel bandwidth using iperf3"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .parallel.Result"
checktype = "mean"
midval = 25487333685.04
minpercent = 30.0
maxpercent = 30.0
[[metric]]
name = "network-iperf3"
type = "json"
description = "iperf"
# Min and Max values to set a 'range' that
# the median of the CSV Results data must fall
# within (inclusive)
checkvar = ".\"network-iperf3\".Results | .[] | .jitter.Result"
checktype = "mean"
midval = 0.038
minpercent = 40.0
maxpercent = 40.0

View File

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment-stratovirt
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
runtimeClassName: kata-stratovirt
containers:
- name: nginx
image: nginx:1.14
ports:
- containerPort: 80

View File

@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: php-apache-kata-stratovirt
name: php-apache-kata-stratovirt
spec:
replicas: 1
selector:
matchLabels:
run: php-apache-kata-stratovirt
template:
metadata:
labels:
run: php-apache-kata-stratovirt
spec:
runtimeClassName: kata-stratovirt
containers:
- image: k8s.gcr.io/hpa-example
imagePullPolicy: Always
name: php-apache
ports:
- containerPort: 80
protocol: TCP
resources:
requests:
cpu: 200m
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: php-apache-kata-stratovirt
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
run: php-apache-kata-stratovirt
sessionAffinity: None
type: ClusterIP

View File

@ -30,7 +30,7 @@ spec:
- name: DEBUG
value: "false"
- name: SHIMS
value: "clh dragonball fc qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx qemu"
value: "clh dragonball fc qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx qemu stratovirt"
- name: DEFAULT_SHIM
value: "qemu"
- name: CREATE_RUNTIMECLASSES

View File

@ -32,7 +32,7 @@ spec:
- name: DEBUG
value: "false"
- name: SHIMS
value: "clh dragonball fc qemu qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx"
value: "clh dragonball fc qemu qemu-nvidia-gpu qemu-sev qemu-snp qemu-tdx stratovirt"
- name: DEFAULT_SHIM
value: "qemu"
- name: CREATE_RUNTIMECLASSES

View File

@ -35,6 +35,7 @@ all: serial-targets \
qemu-snp-experimental-tarball \
qemu-tarball \
qemu-tdx-experimental-tarball \
stratovirt-tarball \
shim-v2-tarball \
tdvf-tarball \
virtiofsd-tarball
@ -115,6 +116,9 @@ qemu-tarball:
qemu-tdx-experimental-tarball:
${MAKE} $@-build
stratovirt-tarball:
${MAKE} $@-build
rootfs-image-tarball:
${MAKE} $@-build

View File

@ -30,6 +30,7 @@ readonly kernel_builder="${static_build_dir}/kernel/build.sh"
readonly ovmf_builder="${static_build_dir}/ovmf/build.sh"
readonly qemu_builder="${static_build_dir}/qemu/build-static-qemu.sh"
readonly qemu_experimental_builder="${static_build_dir}/qemu/build-static-qemu-experimental.sh"
readonly stratovirt_builder="${static_build_dir}/stratovirt/build-static-stratovirt.sh"
readonly shimv2_builder="${static_build_dir}/shim-v2/build.sh"
readonly virtiofsd_builder="${static_build_dir}/virtiofsd/build.sh"
readonly nydus_builder="${static_build_dir}/nydus/build.sh"
@ -104,6 +105,7 @@ options:
qemu
qemu-snp-experimental
qemu-tdx-experimental
stratovirt
rootfs-image
rootfs-image-tdx
rootfs-initrd
@ -515,6 +517,28 @@ install_clh_glibc() {
install_clh_helper "gnu" "${features}" "-glibc"
}
# Install static stratovirt asset
install_stratovirt() {
local stratovirt_version=$(get_from_kata_deps "assets.hypervisor.stratovirt.version")
latest_artefact="${stratovirt_version}"
latest_builder_image=""
install_cached_tarball_component \
"stratovirt" \
"${latest_artefact}" \
"${latest_builder_image}" \
"${final_tarball_name}" \
"${final_tarball_path}" \
&& return 0
info "build static stratovirt"
"${stratovirt_builder}"
info "Install static stratovirt"
mkdir -p "${destdir}/opt/kata/bin/"
sudo install -D --owner root --group root --mode 0744 static-stratovirt/stratovirt "${destdir}/opt/kata/bin/stratovirt"
}
# Install static virtiofsd asset
install_virtiofsd() {
latest_artefact="$(get_from_kata_deps "externals.virtiofsd.version")-$(get_from_kata_deps "externals.virtiofsd.toolchain")"
@ -742,6 +766,7 @@ handle_build() {
install_qemu
install_qemu_snp_experimental
install_qemu_tdx_experimental
install_stratovirt
install_runk
install_shimv2
install_tdvf
@ -791,6 +816,8 @@ handle_build() {
qemu-tdx-experimental) install_qemu_tdx_experimental ;;
stratovirt) install_stratovirt ;;
rootfs-image) install_image ;;
rootfs-image-tdx) install_image_tdx ;;
@ -871,6 +898,7 @@ main() {
log-parser-rs
nydus
qemu
stratovirt
rootfs-image
rootfs-initrd
rootfs-initrd-mariner

View File

@ -102,3 +102,16 @@ overhead:
scheduling:
nodeSelector:
katacontainers.io/kata-runtime: "true"
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: kata-stratovirt
handler: kata-stratovirt
overhead:
podFixed:
memory: "130Mi"
cpu: "250m"
scheduling:
nodeSelector:
katacontainers.io/kata-runtime: "true"

View File

@ -0,0 +1,13 @@
---
kind: RuntimeClass
apiVersion: node.k8s.io/v1
metadata:
name: kata-stratovirt
handler: kata-stratovirt
overhead:
podFixed:
memory: "130Mi"
cpu: "250m"
scheduling:
nodeSelector:
katacontainers.io/kata-runtime: "true"

View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
#
# Copyright (c) 2023 Huawei Technologies Co.,Ltd.
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
ARCH=$(uname -m)
# Currently, StratoVirt only support x86_64 and aarch64.
[ "${ARCH}" != "x86_64" ] && [ "${ARCH}" != "aarch64" ] && exit
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${script_dir}/../../scripts/lib.sh"
info "Get stratovirt information from runtime versions.yaml"
stratovirt_url="${stratovirt_url:-}"
[ -n "$stratovirt_url" ] || stratovirt_url=$(get_from_kata_deps "assets.hypervisor.stratovirt.url")
[ -n "$stratovirt_url" ] || die "failed to get stratovirt url"
stratovirt_version="${stratovirt_version:-}"
[ -n "$stratovirt_version" ] || stratovirt_version=$(get_from_kata_deps "assets.hypervisor.stratovirt.version")
[ -n "$stratovirt_version" ] || die "failed to get stratovirt version"
pull_stratovirt_released_binary() {
file_name="stratovirt-static-${stratovirt_version##*v}-${ARCH}"
download_url="${stratovirt_url}/releases/download/${stratovirt_version}/${file_name}.tar.gz"
curl -L ${download_url} -o ${file_name}.tar.gz
mkdir -p static-stratovirt
tar zxvf ${file_name}.tar.gz -C static-stratovirt
}
pull_stratovirt_released_binary

View File

@ -115,6 +115,11 @@ assets:
url: "https://github.com/AMDESE/qemu"
tag: "3b6a2b6b7466f6dea53243900b7516c3f29027b7"
stratovirt:
description: "StratoVirt is an lightweight opensource VMM"
url: "https://github.com/openeuler-mirror/stratovirt"
version: "v2.3.0"
image:
description: |
Root filesystem disk image used to boot the guest virtual