mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-28 19:54:35 +00:00
config: Protect virtio_fs_daemon annotation
Sending the virtio_fs_daemon annotation can be used to execute arbitrary code on the host. In order to prevent this, restrict the values of the annotation to a list provided by the configuration file. Fixes: #901 Signed-off-by: Christophe de Dinechin <dinechin@redhat.com>
This commit is contained in:
parent
8c75de1966
commit
bf13ff0a3a
@ -62,6 +62,9 @@ default_memory = @DEFMEMSZ@
|
||||
# Path to vhost-user-fs daemon.
|
||||
virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
|
||||
|
||||
# List of valid annotations values for the virtiofs daemon (default: empty)
|
||||
# virtio_fs_daemon_list = [ "/opt/kata/bin/virtiofsd", "/usr/.*/virtiofsd" ]
|
||||
|
||||
# Default size of DAX cache in MiB
|
||||
virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
|
||||
|
||||
|
@ -110,6 +110,9 @@ shared_fs = "@DEFSHAREDFS_QEMU_VIRTIOFS@"
|
||||
# Path to vhost-user-fs daemon.
|
||||
virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
|
||||
|
||||
# List of valid annotations values for the virtiofs daemon (default: empty)
|
||||
# virtio_fs_daemon_list = [ "/opt/kata/bin/virtiofsd", "/usr/.*/virtiofsd" ]
|
||||
|
||||
# Default size of DAX cache in MiB
|
||||
virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
|
||||
|
||||
@ -238,7 +241,7 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
#hotplug_vfio_on_root_bus = true
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
|
||||
#
|
||||
|
@ -101,10 +101,10 @@ default_memory = @DEFMEMSZ@
|
||||
#enable_virtio_mem = true
|
||||
|
||||
# Disable block device from being used for a container's rootfs.
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
# In case of a storage driver like devicemapper where a container's
|
||||
# root file system is backed by a block device, the block device is passed
|
||||
# directly to the hypervisor for performance reasons.
|
||||
# This flag prevents the block device from being passed to the hypervisor,
|
||||
# directly to the hypervisor for performance reasons.
|
||||
# This flag prevents the block device from being passed to the hypervisor,
|
||||
# 9pfs is used instead to pass the rootfs.
|
||||
disable_block_device_use = @DEFDISABLEBLOCK@
|
||||
|
||||
@ -116,6 +116,9 @@ shared_fs = "@DEFSHAREDFS@"
|
||||
# Path to vhost-user-fs daemon.
|
||||
virtio_fs_daemon = "@DEFVIRTIOFSDAEMON@"
|
||||
|
||||
# List of valid annotations values for the virtiofs daemon (default: empty)
|
||||
# virtio_fs_daemon_list = [ "/opt/kata/bin/virtiofsd", "/usr/.*/virtiofsd" ]
|
||||
|
||||
# Default size of DAX cache in MiB
|
||||
virtio_fs_cache_size = @DEFVIRTIOFSCACHESIZE@
|
||||
|
||||
@ -180,7 +183,7 @@ enable_iothreads = @DEFENABLEIOTHREADS@
|
||||
# Enabling this will result in the VM memory
|
||||
# being allocated using huge pages.
|
||||
# This is useful when you want to use vhost-user network
|
||||
# stacks within the container. This will automatically
|
||||
# stacks within the container. This will automatically
|
||||
# result in memory pre allocation
|
||||
#enable_hugepages = true
|
||||
|
||||
@ -236,9 +239,9 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
# Default is false
|
||||
#disable_image_nvdimm = true
|
||||
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# VFIO devices are hotplugged on a bridge by default.
|
||||
# Enable hotplugging on root bus. This may be required for devices with
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
# a large PCI bar, as this is a current limitation with hotplugging on
|
||||
# a bridge. This value is valid for "pc" machine type.
|
||||
# Default false
|
||||
#hotplug_vfio_on_root_bus = true
|
||||
@ -251,7 +254,7 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
|
||||
#pcie_root_port = 2
|
||||
|
||||
# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
# security (vhost-net runs ring0) for network I/O performance.
|
||||
#disable_vhost_net = true
|
||||
|
||||
#
|
||||
|
@ -71,7 +71,7 @@ type factory struct {
|
||||
|
||||
type hypervisor struct {
|
||||
Path string `toml:"path"`
|
||||
PathList []string `toml:"path_list"`
|
||||
HypervisorPathList []string `toml:"path_list"`
|
||||
JailerPath string `toml:"jailer_path"`
|
||||
JailerPathList []string `toml:"jailer_path_list"`
|
||||
Kernel string `toml:"kernel"`
|
||||
@ -533,6 +533,7 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
|
||||
return vc.HypervisorConfig{
|
||||
HypervisorPath: hypervisor,
|
||||
HypervisorPathList: h.HypervisorPathList,
|
||||
JailerPath: jailer,
|
||||
KernelPath: kernel,
|
||||
InitrdPath: initrd,
|
||||
@ -634,6 +635,7 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
|
||||
return vc.HypervisorConfig{
|
||||
HypervisorPath: hypervisor,
|
||||
HypervisorPathList: h.HypervisorPathList,
|
||||
KernelPath: kernel,
|
||||
InitrdPath: initrd,
|
||||
ImagePath: image,
|
||||
@ -723,6 +725,7 @@ func newAcrnHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
|
||||
return vc.HypervisorConfig{
|
||||
HypervisorPath: hypervisor,
|
||||
HypervisorPathList: h.HypervisorPathList,
|
||||
KernelPath: kernel,
|
||||
ImagePath: image,
|
||||
HypervisorCtlPath: hypervisorctl,
|
||||
@ -793,6 +796,7 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
|
||||
return vc.HypervisorConfig{
|
||||
HypervisorPath: hypervisor,
|
||||
HypervisorPathList: h.HypervisorPathList,
|
||||
KernelPath: kernel,
|
||||
InitrdPath: initrd,
|
||||
ImagePath: image,
|
||||
@ -811,6 +815,7 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
||||
DisableBlockDeviceUse: h.DisableBlockDeviceUse,
|
||||
SharedFS: sharedFS,
|
||||
VirtioFSDaemon: h.VirtioFSDaemon,
|
||||
VirtioFSDaemonList: h.VirtioFSDaemonList,
|
||||
VirtioFSCacheSize: h.VirtioFSCacheSize,
|
||||
VirtioFSCache: h.VirtioFSCache,
|
||||
MemPrealloc: h.MemPrealloc,
|
||||
|
@ -275,6 +275,9 @@ type HypervisorConfig struct {
|
||||
// HypervisorPath is the hypervisor executable host path.
|
||||
HypervisorPath string
|
||||
|
||||
// HypervisorPathList is the list of hypervisor paths names allowed in annotations
|
||||
HypervisorPathList []string
|
||||
|
||||
// HypervisorCtlPath is the hypervisor ctl executable host path.
|
||||
HypervisorCtlPath string
|
||||
|
||||
@ -309,6 +312,9 @@ type HypervisorConfig struct {
|
||||
// VirtioFSDaemon is the virtio-fs vhost-user daemon path
|
||||
VirtioFSDaemon string
|
||||
|
||||
// VirtioFSDaemonList is the list of valid virtiofs names for annotations
|
||||
VirtioFSDaemonList []string
|
||||
|
||||
// VirtioFSCache cache mode for fs version cache or "none"
|
||||
VirtioFSCache string
|
||||
|
||||
|
@ -212,6 +212,7 @@ func (s *Sandbox) dumpConfig(ss *persistapi.SandboxState) {
|
||||
MachineAccelerators: sconfig.HypervisorConfig.MachineAccelerators,
|
||||
CPUFeatures: sconfig.HypervisorConfig.CPUFeatures,
|
||||
HypervisorPath: sconfig.HypervisorConfig.HypervisorPath,
|
||||
HypervisorPathList: sconfig.HypervisorConfig.HypervisorPathList,
|
||||
HypervisorCtlPath: sconfig.HypervisorConfig.HypervisorCtlPath,
|
||||
JailerPath: sconfig.HypervisorConfig.JailerPath,
|
||||
BlockDeviceDriver: sconfig.HypervisorConfig.BlockDeviceDriver,
|
||||
@ -221,6 +222,7 @@ func (s *Sandbox) dumpConfig(ss *persistapi.SandboxState) {
|
||||
EntropySource: sconfig.HypervisorConfig.EntropySource,
|
||||
SharedFS: sconfig.HypervisorConfig.SharedFS,
|
||||
VirtioFSDaemon: sconfig.HypervisorConfig.VirtioFSDaemon,
|
||||
VirtioFSDaemonList: sconfig.HypervisorConfig.VirtioFSDaemonList,
|
||||
VirtioFSCache: sconfig.HypervisorConfig.VirtioFSCache,
|
||||
VirtioFSExtraArgs: sconfig.HypervisorConfig.VirtioFSExtraArgs[:],
|
||||
BlockDeviceCacheSet: sconfig.HypervisorConfig.BlockDeviceCacheSet,
|
||||
@ -474,6 +476,7 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) {
|
||||
MachineAccelerators: hconf.MachineAccelerators,
|
||||
CPUFeatures: hconf.CPUFeatures,
|
||||
HypervisorPath: hconf.HypervisorPath,
|
||||
HypervisorPathList: hconf.HypervisorPathList,
|
||||
HypervisorCtlPath: hconf.HypervisorCtlPath,
|
||||
JailerPath: hconf.JailerPath,
|
||||
BlockDeviceDriver: hconf.BlockDeviceDriver,
|
||||
@ -483,6 +486,7 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) {
|
||||
EntropySource: hconf.EntropySource,
|
||||
SharedFS: hconf.SharedFS,
|
||||
VirtioFSDaemon: hconf.VirtioFSDaemon,
|
||||
VirtioFSDaemonList: hconf.VirtioFSDaemonList,
|
||||
VirtioFSCache: hconf.VirtioFSCache,
|
||||
VirtioFSExtraArgs: hconf.VirtioFSExtraArgs[:],
|
||||
BlockDeviceCacheSet: hconf.BlockDeviceCacheSet,
|
||||
|
@ -60,6 +60,9 @@ type HypervisorConfig struct {
|
||||
// HypervisorPath is the hypervisor executable host path.
|
||||
HypervisorPath string
|
||||
|
||||
// HypervisorPathList is the list of hypervisor paths names allowed in annotations
|
||||
HypervisorPathList []string
|
||||
|
||||
// HypervisorCtlPath is the hypervisor ctl executable host path.
|
||||
HypervisorCtlPath string
|
||||
|
||||
@ -94,6 +97,9 @@ type HypervisorConfig struct {
|
||||
// VirtioFSDaemon is the virtio-fs vhost-user daemon path
|
||||
VirtioFSDaemon string
|
||||
|
||||
// VirtioFSDaemonList is the list of valid virtiofs names for annotations
|
||||
VirtioFSDaemonList []string
|
||||
|
||||
// VirtioFSCache cache mode for fs version cache or "none"
|
||||
VirtioFSCache string
|
||||
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -191,6 +192,15 @@ func contains(s []string, e string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func regexpContains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if matched, _ := regexp.MatchString(a, e); matched {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func newLinuxDeviceInfo(d specs.LinuxDevice) (*config.DeviceInfo, error) {
|
||||
allowedDeviceTypes := []string{"c", "b", "u", "p"}
|
||||
|
||||
@ -323,13 +333,13 @@ func SandboxID(spec specs.Spec) (string, error) {
|
||||
return "", fmt.Errorf("Could not find sandbox ID")
|
||||
}
|
||||
|
||||
func addAnnotations(ocispec specs.Spec, config *vc.SandboxConfig) error {
|
||||
func addAnnotations(ocispec specs.Spec, config *vc.SandboxConfig, runtime RuntimeConfig) error {
|
||||
addAssetAnnotations(ocispec, config)
|
||||
if err := addHypervisorConfigOverrides(ocispec, config); err != nil {
|
||||
if err := addHypervisorConfigOverrides(ocispec, config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := addRuntimeConfigOverrides(ocispec, config); err != nil {
|
||||
if err := addRuntimeConfigOverrides(ocispec, config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -362,7 +372,7 @@ func addAssetAnnotations(ocispec specs.Spec, config *vc.SandboxConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
func addHypervisorConfigOverrides(ocispec specs.Spec, config *vc.SandboxConfig) error {
|
||||
func addHypervisorConfigOverrides(ocispec specs.Spec, config *vc.SandboxConfig, runtime RuntimeConfig) error {
|
||||
if err := addHypervisorCPUOverrides(ocispec, config); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -375,7 +385,7 @@ func addHypervisorConfigOverrides(ocispec specs.Spec, config *vc.SandboxConfig)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := addHypervisporVirtioFsOverrides(ocispec, config); err != nil {
|
||||
if err := addHypervisporVirtioFsOverrides(ocispec, config, runtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -661,7 +671,7 @@ func addHypervisorBlockOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
func addHypervisporVirtioFsOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig) error {
|
||||
func addHypervisporVirtioFsOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig, runtime RuntimeConfig) error {
|
||||
if value, ok := ocispec.Annotations[vcAnnotations.SharedFS]; ok {
|
||||
supportedSharedFS := []string{config.Virtio9P, config.VirtioFS}
|
||||
valid := false
|
||||
@ -678,6 +688,9 @@ func addHypervisporVirtioFsOverrides(ocispec specs.Spec, sbConfig *vc.SandboxCon
|
||||
}
|
||||
|
||||
if value, ok := ocispec.Annotations[vcAnnotations.VirtioFSDaemon]; ok {
|
||||
if !regexpContains(runtime.HypervisorConfig.VirtioFSDaemonList, value) {
|
||||
return fmt.Errorf("virtiofs daemon %v required from annotation is not valid", value)
|
||||
}
|
||||
sbConfig.HypervisorConfig.VirtioFSDaemon = value
|
||||
}
|
||||
|
||||
@ -745,7 +758,7 @@ func addHypervisporNetworkOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConf
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRuntimeConfigOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig) error {
|
||||
func addRuntimeConfigOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig, runtime RuntimeConfig) error {
|
||||
if value, ok := ocispec.Annotations[vcAnnotations.DisableGuestSeccomp]; ok {
|
||||
disableGuestSeccomp, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
@ -885,7 +898,7 @@ func SandboxConfig(ocispec specs.Spec, runtime RuntimeConfig, bundlePath, cid, c
|
||||
Experimental: runtime.Experimental,
|
||||
}
|
||||
|
||||
if err := addAnnotations(ocispec, &sandboxConfig); err != nil {
|
||||
if err := addAnnotations(ocispec, &sandboxConfig, runtime); err != nil {
|
||||
return vc.SandboxConfig{}, err
|
||||
}
|
||||
|
||||
|
@ -676,7 +676,12 @@ func TestAddAssetAnnotations(t *testing.T) {
|
||||
Annotations: expectedAnnotations,
|
||||
}
|
||||
|
||||
addAnnotations(ocispec, &config)
|
||||
runtimeConfig := RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
Console: consolePath,
|
||||
}
|
||||
|
||||
addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Exactly(expectedAnnotations, config.Annotations)
|
||||
}
|
||||
|
||||
@ -700,9 +705,14 @@ func TestAddAgentAnnotations(t *testing.T) {
|
||||
ContainerPipeSize: 1024,
|
||||
}
|
||||
|
||||
runtimeConfig := RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
Console: consolePath,
|
||||
}
|
||||
|
||||
ocispec.Annotations[vcAnnotations.KernelModules] = strings.Join(expectedAgentConfig.KernelModules, KernelModulesSeparator)
|
||||
ocispec.Annotations[vcAnnotations.AgentContainerPipeSize] = "1024"
|
||||
addAnnotations(ocispec, &config)
|
||||
addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Exactly(expectedAgentConfig, config.AgentConfig)
|
||||
}
|
||||
|
||||
@ -722,8 +732,13 @@ func TestContainerPipeSizeAnnotation(t *testing.T) {
|
||||
ContainerPipeSize: 0,
|
||||
}
|
||||
|
||||
runtimeConfig := RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
Console: consolePath,
|
||||
}
|
||||
|
||||
ocispec.Annotations[vcAnnotations.AgentContainerPipeSize] = "foo"
|
||||
err := addAnnotations(ocispec, &config)
|
||||
err := addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Error(err)
|
||||
assert.Exactly(expectedAgentConfig, config.AgentConfig)
|
||||
}
|
||||
@ -752,8 +767,13 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
runtimeConfig := RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
Console: consolePath,
|
||||
}
|
||||
|
||||
ocispec.Annotations[vcAnnotations.KernelParams] = "vsyscall=emulate iommu=on"
|
||||
addHypervisorConfigOverrides(ocispec, &config)
|
||||
addHypervisorConfigOverrides(ocispec, &config, runtimeConfig)
|
||||
assert.Exactly(expectedHyperConfig, config.HypervisorConfig)
|
||||
|
||||
ocispec.Annotations[vcAnnotations.DefaultVCPUs] = "1"
|
||||
@ -792,7 +812,7 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
||||
ocispec.Annotations[vcAnnotations.RxRateLimiterMaxRate] = "10000000"
|
||||
ocispec.Annotations[vcAnnotations.TxRateLimiterMaxRate] = "10000000"
|
||||
|
||||
addAnnotations(ocispec, &config)
|
||||
addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Equal(config.HypervisorConfig.NumVCPUs, uint32(1))
|
||||
assert.Equal(config.HypervisorConfig.DefaultMaxVCPUs, uint32(1))
|
||||
assert.Equal(config.HypervisorConfig.MemorySize, uint32(1024))
|
||||
@ -830,16 +850,16 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
||||
|
||||
// In case an absurd large value is provided, the config value if not over-ridden
|
||||
ocispec.Annotations[vcAnnotations.DefaultVCPUs] = "655536"
|
||||
err := addAnnotations(ocispec, &config)
|
||||
err := addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Error(err)
|
||||
|
||||
ocispec.Annotations[vcAnnotations.DefaultVCPUs] = "-1"
|
||||
err = addAnnotations(ocispec, &config)
|
||||
err = addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Error(err)
|
||||
|
||||
ocispec.Annotations[vcAnnotations.DefaultVCPUs] = "1"
|
||||
ocispec.Annotations[vcAnnotations.DefaultMaxVCPUs] = "-1"
|
||||
err = addAnnotations(ocispec, &config)
|
||||
err = addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Error(err)
|
||||
|
||||
ocispec.Annotations[vcAnnotations.DefaultMaxVCPUs] = "1"
|
||||
@ -858,12 +878,17 @@ func TestAddRuntimeAnnotations(t *testing.T) {
|
||||
Annotations: make(map[string]string),
|
||||
}
|
||||
|
||||
runtimeConfig := RuntimeConfig{
|
||||
HypervisorType: vc.QemuHypervisor,
|
||||
Console: consolePath,
|
||||
}
|
||||
|
||||
ocispec.Annotations[vcAnnotations.DisableGuestSeccomp] = "true"
|
||||
ocispec.Annotations[vcAnnotations.SandboxCgroupOnly] = "true"
|
||||
ocispec.Annotations[vcAnnotations.DisableNewNetNs] = "true"
|
||||
ocispec.Annotations[vcAnnotations.InterNetworkModel] = "macvtap"
|
||||
|
||||
addAnnotations(ocispec, &config)
|
||||
addAnnotations(ocispec, &config, runtimeConfig)
|
||||
assert.Equal(config.DisableGuestSeccomp, true)
|
||||
assert.Equal(config.SandboxCgroupOnly, true)
|
||||
assert.Equal(config.NetworkConfig.DisableNewNetNs, true)
|
||||
|
Loading…
Reference in New Issue
Block a user