From 2d35e6066d3fb309952635e46b61bd451eef857c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 21 Apr 2022 01:06:10 +0200 Subject: [PATCH 01/11] hypervisor: Add network bandwidth and operations rate limiters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In a similar way to what's already exposed as RxRateLimiterMaxRate and TxRateLimiterMaxRate, let's add four new fields to the Hypervisor's configuration. The values added are related to bandwidth and operations rate limiters, which have to be added so we can expose I/O throttling configurations to users using Cloud Hypervisor as their preferred VMM. The reason we cannot simply re-use {Rx,Tx}RateLimiterMaxRate is because Cloud Hypervisor exposes a single MaxRate to be used for both inbound and outbound queues. The newly added fields are: * NetRateLimiterBwMaxRate, defined in bits per second, which is used to control the network I/O bandwidth at the VM level. * NetRateLimiterBwOneTimeBurst, also defined in bits per second, which is used to define an *initial* max rate, which doesn't replenish. * NetRateLimiterOpsMaxRate, the operations per second equivalent of the NetRateLimiterBwMaxRate. * NetRateLimiterOpsOneTimeBurst, the operations per second equivalent of the NetRateLimiterBwOneTimeBurst. For now those extra fields have only been added to the hypervisor's configuration and they'll be used in the coming patches of this very same series. Signed-off-by: Fabiano Fidêncio --- src/runtime/pkg/katautils/config.go | 160 ++++++++++++++--------- src/runtime/virtcontainers/hypervisor.go | 18 +++ 2 files changed, 114 insertions(+), 64 deletions(-) diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 3525bc236e..3e15929ff3 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -74,70 +74,74 @@ type factory struct { } type hypervisor struct { - Path string `toml:"path"` - JailerPath string `toml:"jailer_path"` - Kernel string `toml:"kernel"` - CtlPath string `toml:"ctlpath"` - Initrd string `toml:"initrd"` - Image string `toml:"image"` - Firmware string `toml:"firmware"` - FirmwareVolume string `toml:"firmware_volume"` - MachineAccelerators string `toml:"machine_accelerators"` - CPUFeatures string `toml:"cpu_features"` - KernelParams string `toml:"kernel_params"` - MachineType string `toml:"machine_type"` - BlockDeviceDriver string `toml:"block_device_driver"` - EntropySource string `toml:"entropy_source"` - SharedFS string `toml:"shared_fs"` - VirtioFSDaemon string `toml:"virtio_fs_daemon"` - VirtioFSCache string `toml:"virtio_fs_cache"` - VhostUserStorePath string `toml:"vhost_user_store_path"` - FileBackedMemRootDir string `toml:"file_mem_backend"` - GuestHookPath string `toml:"guest_hook_path"` - GuestMemoryDumpPath string `toml:"guest_memory_dump_path"` - HypervisorPathList []string `toml:"valid_hypervisor_paths"` - JailerPathList []string `toml:"valid_jailer_paths"` - CtlPathList []string `toml:"valid_ctlpaths"` - VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` - VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` - PFlashList []string `toml:"pflashes"` - VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"` - FileBackedMemRootList []string `toml:"valid_file_mem_backends"` - EntropySourceList []string `toml:"valid_entropy_sources"` - EnableAnnotations []string `toml:"enable_annotations"` - RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` - TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` - MemOffset uint64 `toml:"memory_offset"` - VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` - DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"` - MemorySize uint32 `toml:"default_memory"` - MemSlots uint32 `toml:"memory_slots"` - DefaultBridges uint32 `toml:"default_bridges"` - Msize9p uint32 `toml:"msize_9p"` - PCIeRootPort uint32 `toml:"pcie_root_port"` - NumVCPUs int32 `toml:"default_vcpus"` - BlockDeviceCacheSet bool `toml:"block_device_cache_set"` - BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` - BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` - EnableVhostUserStore bool `toml:"enable_vhost_user_store"` - DisableBlockDeviceUse bool `toml:"disable_block_device_use"` - MemPrealloc bool `toml:"enable_mem_prealloc"` - HugePages bool `toml:"enable_hugepages"` - VirtioMem bool `toml:"enable_virtio_mem"` - IOMMU bool `toml:"enable_iommu"` - IOMMUPlatform bool `toml:"enable_iommu_platform"` - Debug bool `toml:"enable_debug"` - DisableNestingChecks bool `toml:"disable_nesting_checks"` - EnableIOThreads bool `toml:"enable_iothreads"` - DisableImageNvdimm bool `toml:"disable_image_nvdimm"` - HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"` - DisableVhostNet bool `toml:"disable_vhost_net"` - GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"` - ConfidentialGuest bool `toml:"confidential_guest"` - GuestSwap bool `toml:"enable_guest_swap"` - Rootless bool `toml:"rootless"` - DisableSeccomp bool `toml:"disable_seccomp"` - DisableSeLinux bool `toml:"disable_selinux"` + Path string `toml:"path"` + JailerPath string `toml:"jailer_path"` + Kernel string `toml:"kernel"` + CtlPath string `toml:"ctlpath"` + Initrd string `toml:"initrd"` + Image string `toml:"image"` + Firmware string `toml:"firmware"` + FirmwareVolume string `toml:"firmware_volume"` + MachineAccelerators string `toml:"machine_accelerators"` + CPUFeatures string `toml:"cpu_features"` + KernelParams string `toml:"kernel_params"` + MachineType string `toml:"machine_type"` + BlockDeviceDriver string `toml:"block_device_driver"` + EntropySource string `toml:"entropy_source"` + SharedFS string `toml:"shared_fs"` + VirtioFSDaemon string `toml:"virtio_fs_daemon"` + VirtioFSCache string `toml:"virtio_fs_cache"` + VhostUserStorePath string `toml:"vhost_user_store_path"` + FileBackedMemRootDir string `toml:"file_mem_backend"` + GuestHookPath string `toml:"guest_hook_path"` + GuestMemoryDumpPath string `toml:"guest_memory_dump_path"` + HypervisorPathList []string `toml:"valid_hypervisor_paths"` + JailerPathList []string `toml:"valid_jailer_paths"` + CtlPathList []string `toml:"valid_ctlpaths"` + VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` + VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` + PFlashList []string `toml:"pflashes"` + VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"` + FileBackedMemRootList []string `toml:"valid_file_mem_backends"` + EntropySourceList []string `toml:"valid_entropy_sources"` + EnableAnnotations []string `toml:"enable_annotations"` + RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` + TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` + MemOffset uint64 `toml:"memory_offset"` + NetRateLimiterBwMaxRate int64 `toml:"net_rate_limiter_bw_max_rate"` + NetRateLimiterBwOneTimeBurst int64 `toml:"net_rate_limiter_bw_one_time_burst"` + NetRateLimiterOpsMaxRate int64 `toml:"net_rate_limiter_ops_max_rate"` + NetRateLimiterOpsOneTimeBurst int64 `toml:"net_rate_limiter_ops_one_time_burst"` + VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` + DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"` + MemorySize uint32 `toml:"default_memory"` + MemSlots uint32 `toml:"memory_slots"` + DefaultBridges uint32 `toml:"default_bridges"` + Msize9p uint32 `toml:"msize_9p"` + PCIeRootPort uint32 `toml:"pcie_root_port"` + NumVCPUs int32 `toml:"default_vcpus"` + BlockDeviceCacheSet bool `toml:"block_device_cache_set"` + BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` + BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` + EnableVhostUserStore bool `toml:"enable_vhost_user_store"` + DisableBlockDeviceUse bool `toml:"disable_block_device_use"` + MemPrealloc bool `toml:"enable_mem_prealloc"` + HugePages bool `toml:"enable_hugepages"` + VirtioMem bool `toml:"enable_virtio_mem"` + IOMMU bool `toml:"enable_iommu"` + IOMMUPlatform bool `toml:"enable_iommu_platform"` + Debug bool `toml:"enable_debug"` + DisableNestingChecks bool `toml:"disable_nesting_checks"` + EnableIOThreads bool `toml:"enable_iothreads"` + DisableImageNvdimm bool `toml:"disable_image_nvdimm"` + HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"` + DisableVhostNet bool `toml:"disable_vhost_net"` + GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"` + ConfidentialGuest bool `toml:"confidential_guest"` + GuestSwap bool `toml:"enable_guest_swap"` + Rootless bool `toml:"rootless"` + DisableSeccomp bool `toml:"disable_seccomp"` + DisableSeLinux bool `toml:"disable_selinux"` } type runtime struct { @@ -490,6 +494,34 @@ func (h hypervisor) getTxRateLimiterCfg() uint64 { return h.TxRateLimiterMaxRate } +func (h hypervisor) getNetRateLimiterBwMaxRate() int64 { + return h.NetRateLimiterBwMaxRate +} + +func (h hypervisor) getNetRateLimiterBwOneTimeBurst() int64 { + if h.NetRateLimiterBwOneTimeBurst != 0 && h.getNetRateLimiterBwMaxRate() == 0 { + kataUtilsLogger.Warn("The NetRateLimiterBwOneTimeBurst is set but NetRateLimiterBwMaxRate is not set, this option will be ignored.") + + h.NetRateLimiterBwOneTimeBurst = 0 + } + + return h.NetRateLimiterBwOneTimeBurst +} + +func (h hypervisor) getNetRateLimiterOpsMaxRate() int64 { + return h.NetRateLimiterOpsMaxRate +} + +func (h hypervisor) getNetRateLimiterOpsOneTimeBurst() int64 { + if h.NetRateLimiterOpsOneTimeBurst != 0 && h.getNetRateLimiterOpsMaxRate() == 0 { + kataUtilsLogger.Warn("The NetRateLimiterOpsOneTimeBurst is set but NetRateLimiterOpsMaxRate is not set, this option will be ignored.") + + h.NetRateLimiterOpsOneTimeBurst = 0 + } + + return h.NetRateLimiterOpsOneTimeBurst +} + func (h hypervisor) getIOMMUPlatform() bool { if h.IOMMUPlatform { kataUtilsLogger.Info("IOMMUPlatform is enabled by default.") diff --git a/src/runtime/virtcontainers/hypervisor.go b/src/runtime/virtcontainers/hypervisor.go index 12725160b8..fa9b668673 100644 --- a/src/runtime/virtcontainers/hypervisor.go +++ b/src/runtime/virtcontainers/hypervisor.go @@ -386,6 +386,24 @@ type HypervisorConfig struct { // TxRateLimiterMaxRate is used to control network I/O outbound bandwidth on VM level. TxRateLimiterMaxRate uint64 + // NetRateLimiterBwRate is used to control network I/O bandwidth on VM level. + // The same value, defined in bits per second, is used for inbound and outbound bandwidth. + NetRateLimiterBwMaxRate int64 + + // NetRateLimiterBwOneTimeBurst is used to control network I/O bandwidth on VM level. + // This increases the initial max rate and this initial extra credit does *NOT* replenish + // and can be used for an *initial* burst of data. + NetRateLimiterBwOneTimeBurst int64 + + // NetRateLimiterOpsRate is used to control network I/O operations on VM level. + // The same value, defined in operations per second, is used for inbound and outbound bandwidth. + NetRateLimiterOpsMaxRate int64 + + // NetRateLimiterOpsOneTimeBurst is used to control network I/O operations on VM level. + // This increases the initial max rate and this initial extra credit does *NOT* replenish + // and can be used for an *initial* burst of data. + NetRateLimiterOpsOneTimeBurst int64 + // MemOffset specifies memory space for nvdimm device MemOffset uint64 From c9f6496d6dfa2478a6e80b5c2d0782502ea0235c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 21 Apr 2022 13:02:34 +0200 Subject: [PATCH 02/11] config: Add NetRateLimiter* to Cloud Hypervisor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's add the newly added network rate limiter configurations to the Cloud Hypervisor's hypervisor configuration. Right now those are not used anywhere, and there's absolutely no way the users can set those up. That's coming later in this very same series. Signed-off-by: Fabiano Fidêncio --- src/runtime/pkg/katautils/config.go | 96 ++++++++++++------------ src/runtime/pkg/katautils/config_test.go | 34 +++++++-- 2 files changed, 79 insertions(+), 51 deletions(-) diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 3e15929ff3..3cb37780db 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -860,52 +860,56 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { } return vc.HypervisorConfig{ - HypervisorPath: hypervisor, - HypervisorPathList: h.HypervisorPathList, - KernelPath: kernel, - InitrdPath: initrd, - ImagePath: image, - FirmwarePath: firmware, - MachineAccelerators: machineAccelerators, - KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)), - HypervisorMachineType: machineType, - NumVCPUs: h.defaultVCPUs(), - DefaultMaxVCPUs: h.defaultMaxVCPUs(), - MemorySize: h.defaultMemSz(), - MemSlots: h.defaultMemSlots(), - MemOffset: h.defaultMemOffset(), - VirtioMem: h.VirtioMem, - EntropySource: h.GetEntropySource(), - EntropySourceList: h.EntropySourceList, - DefaultBridges: h.defaultBridges(), - DisableBlockDeviceUse: h.DisableBlockDeviceUse, - SharedFS: sharedFS, - VirtioFSDaemon: h.VirtioFSDaemon, - VirtioFSDaemonList: h.VirtioFSDaemonList, - VirtioFSCacheSize: h.VirtioFSCacheSize, - VirtioFSCache: h.VirtioFSCache, - MemPrealloc: h.MemPrealloc, - HugePages: h.HugePages, - FileBackedMemRootDir: h.FileBackedMemRootDir, - FileBackedMemRootList: h.FileBackedMemRootList, - Debug: h.Debug, - DisableNestingChecks: h.DisableNestingChecks, - BlockDeviceDriver: blockDriver, - BlockDeviceCacheSet: h.BlockDeviceCacheSet, - BlockDeviceCacheDirect: h.BlockDeviceCacheDirect, - BlockDeviceCacheNoflush: h.BlockDeviceCacheNoflush, - EnableIOThreads: h.EnableIOThreads, - Msize9p: h.msize9p(), - HotplugVFIOOnRootBus: h.HotplugVFIOOnRootBus, - PCIeRootPort: h.PCIeRootPort, - DisableVhostNet: true, - GuestHookPath: h.guestHookPath(), - VirtioFSExtraArgs: h.VirtioFSExtraArgs, - SGXEPCSize: defaultSGXEPCSize, - EnableAnnotations: h.EnableAnnotations, - DisableSeccomp: h.DisableSeccomp, - ConfidentialGuest: h.ConfidentialGuest, - DisableSeLinux: h.DisableSeLinux, + HypervisorPath: hypervisor, + HypervisorPathList: h.HypervisorPathList, + KernelPath: kernel, + InitrdPath: initrd, + ImagePath: image, + FirmwarePath: firmware, + MachineAccelerators: machineAccelerators, + KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)), + HypervisorMachineType: machineType, + NumVCPUs: h.defaultVCPUs(), + DefaultMaxVCPUs: h.defaultMaxVCPUs(), + MemorySize: h.defaultMemSz(), + MemSlots: h.defaultMemSlots(), + MemOffset: h.defaultMemOffset(), + VirtioMem: h.VirtioMem, + EntropySource: h.GetEntropySource(), + EntropySourceList: h.EntropySourceList, + DefaultBridges: h.defaultBridges(), + DisableBlockDeviceUse: h.DisableBlockDeviceUse, + SharedFS: sharedFS, + VirtioFSDaemon: h.VirtioFSDaemon, + VirtioFSDaemonList: h.VirtioFSDaemonList, + VirtioFSCacheSize: h.VirtioFSCacheSize, + VirtioFSCache: h.VirtioFSCache, + MemPrealloc: h.MemPrealloc, + HugePages: h.HugePages, + FileBackedMemRootDir: h.FileBackedMemRootDir, + FileBackedMemRootList: h.FileBackedMemRootList, + Debug: h.Debug, + DisableNestingChecks: h.DisableNestingChecks, + BlockDeviceDriver: blockDriver, + BlockDeviceCacheSet: h.BlockDeviceCacheSet, + BlockDeviceCacheDirect: h.BlockDeviceCacheDirect, + BlockDeviceCacheNoflush: h.BlockDeviceCacheNoflush, + EnableIOThreads: h.EnableIOThreads, + Msize9p: h.msize9p(), + HotplugVFIOOnRootBus: h.HotplugVFIOOnRootBus, + PCIeRootPort: h.PCIeRootPort, + DisableVhostNet: true, + GuestHookPath: h.guestHookPath(), + VirtioFSExtraArgs: h.VirtioFSExtraArgs, + SGXEPCSize: defaultSGXEPCSize, + EnableAnnotations: h.EnableAnnotations, + DisableSeccomp: h.DisableSeccomp, + ConfidentialGuest: h.ConfidentialGuest, + DisableSeLinux: h.DisableSeLinux, + NetRateLimiterBwMaxRate: h.getNetRateLimiterBwMaxRate(), + NetRateLimiterBwOneTimeBurst: h.getNetRateLimiterBwOneTimeBurst(), + NetRateLimiterOpsMaxRate: h.getNetRateLimiterOpsMaxRate(), + NetRateLimiterOpsOneTimeBurst: h.getNetRateLimiterOpsOneTimeBurst(), }, nil } diff --git a/src/runtime/pkg/katautils/config_test.go b/src/runtime/pkg/katautils/config_test.go index 2f85adcb3b..778f6ec6de 100644 --- a/src/runtime/pkg/katautils/config_test.go +++ b/src/runtime/pkg/katautils/config_test.go @@ -810,6 +810,10 @@ func TestNewClhHypervisorConfig(t *testing.T) { kernelPath := path.Join(tmpdir, "kernel") imagePath := path.Join(tmpdir, "image") virtioFsDaemon := path.Join(tmpdir, "virtiofsd") + netRateLimiterBwMaxRate := int64(1000) + netRateLimiterBwOneTimeBurst := int64(1000) + netRateLimiterOpsMaxRate := int64(0) + netRateLimiterOpsOneTimeBurst := int64(1000) for _, file := range []string{imagePath, hypervisorPath, kernelPath, virtioFsDaemon} { err := createEmptyFile(file) @@ -817,11 +821,15 @@ func TestNewClhHypervisorConfig(t *testing.T) { } hypervisor := hypervisor{ - Path: hypervisorPath, - Kernel: kernelPath, - Image: imagePath, - VirtioFSDaemon: virtioFsDaemon, - VirtioFSCache: "always", + Path: hypervisorPath, + Kernel: kernelPath, + Image: imagePath, + VirtioFSDaemon: virtioFsDaemon, + VirtioFSCache: "always", + NetRateLimiterBwMaxRate: netRateLimiterBwMaxRate, + NetRateLimiterBwOneTimeBurst: netRateLimiterBwOneTimeBurst, + NetRateLimiterOpsMaxRate: netRateLimiterOpsMaxRate, + NetRateLimiterOpsOneTimeBurst: netRateLimiterOpsOneTimeBurst, } config, err := newClhHypervisorConfig(hypervisor) if err != nil { @@ -852,6 +860,22 @@ func TestNewClhHypervisorConfig(t *testing.T) { t.Errorf("Expected VirtioFSCache %v, got %v", true, config.VirtioFSCache) } + if config.NetRateLimiterBwMaxRate != netRateLimiterBwMaxRate { + t.Errorf("Expected value for network bandwidth rate limiter %v, got %v", netRateLimiterBwMaxRate, config.NetRateLimiterBwMaxRate) + } + + if config.NetRateLimiterBwOneTimeBurst != netRateLimiterBwOneTimeBurst { + t.Errorf("Expected value for network bandwidth one time burst %v, got %v", netRateLimiterBwOneTimeBurst, config.NetRateLimiterBwOneTimeBurst) + } + + if config.NetRateLimiterOpsMaxRate != netRateLimiterOpsMaxRate { + t.Errorf("Expected value for network operations rate limiter %v, got %v", netRateLimiterOpsMaxRate, config.NetRateLimiterOpsMaxRate) + } + + // We expect 0 (zero) here as netRateLimiterOpsMaxRate is not set (set to zero). + if config.NetRateLimiterOpsOneTimeBurst != 0 { + t.Errorf("Expected value for network operations one time burst %v, got %v", netRateLimiterOpsOneTimeBurst, config.NetRateLimiterOpsOneTimeBurst) + } } func TestHypervisorDefaults(t *testing.T) { From be1bb7e39f876f693a1d224ce90678be6a856ecb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 21 Apr 2022 03:21:17 +0200 Subject: [PATCH 03/11] utils: Move FC's function to revert bytes to utils MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Firecracker's revertBytes function, now called "RevertBytes", can be exposed as part of the virtcontainers' utils file, as this function will be reused by Cloud Hypervisor, when adding the rate limiter logic there. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/fc.go | 16 ++-------------- src/runtime/virtcontainers/fc_test.go | 11 ----------- src/runtime/virtcontainers/utils/utils.go | 16 ++++++++++++++++ src/runtime/virtcontainers/utils/utils_test.go | 11 +++++++++++ 4 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/runtime/virtcontainers/fc.go b/src/runtime/virtcontainers/fc.go index 4630a810c8..cb6e3a2601 100644 --- a/src/runtime/virtcontainers/fc.go +++ b/src/runtime/virtcontainers/fc.go @@ -938,7 +938,7 @@ func (fc *firecracker) fcAddNetDevice(ctx context.Context, endpoint Endpoint) { // kata-defined rxSize is in bits with scaling factors of 1000, but firecracker-defined // rxSize is in bytes with scaling factors of 1024, need reversion. - rxSize = revertBytes(rxSize / 8) + rxSize = utils.RevertBytes(rxSize / 8) rxTokenBucket := models.TokenBucket{ RefillTime: &refillTime, Size: &rxSize, @@ -955,7 +955,7 @@ func (fc *firecracker) fcAddNetDevice(ctx context.Context, endpoint Endpoint) { // kata-defined txSize is in bits with scaling factors of 1000, but firecracker-defined // txSize is in bytes with scaling factors of 1024, need reversion. - txSize = revertBytes(txSize / 8) + txSize = utils.RevertBytes(txSize / 8) txTokenBucket := models.TokenBucket{ RefillTime: &refillTime, Size: &txSize, @@ -1266,15 +1266,3 @@ func (fc *firecracker) GenerateSocket(id string) (interface{}, error) { func (fc *firecracker) IsRateLimiterBuiltin() bool { return true } - -// In firecracker, it accepts the size of rate limiter in scaling factors of 2^10(1024) -// But in kata-defined rate limiter, for better Human-readability, we prefer scaling factors of 10^3(1000). -// func revertByte reverts num from scaling factors of 1000 to 1024, e.g. 10000000(10MB) to 10485760. -func revertBytes(num uint64) uint64 { - a := num / 1000 - b := num % 1000 - if a == 0 { - return num - } - return 1024*revertBytes(a) + b -} diff --git a/src/runtime/virtcontainers/fc_test.go b/src/runtime/virtcontainers/fc_test.go index f2b099f1db..78ab704eea 100644 --- a/src/runtime/virtcontainers/fc_test.go +++ b/src/runtime/virtcontainers/fc_test.go @@ -50,17 +50,6 @@ func TestFCTruncateID(t *testing.T) { assert.Equal(expectedID, id) } -func TestRevertBytes(t *testing.T) { - assert := assert.New(t) - - //10MB - testNum := uint64(10000000) - expectedNum := uint64(10485760) - - num := revertBytes(testNum) - assert.Equal(expectedNum, num) -} - func TestFCParseVersion(t *testing.T) { assert := assert.New(t) diff --git a/src/runtime/virtcontainers/utils/utils.go b/src/runtime/virtcontainers/utils/utils.go index 88c29cec5a..048446fd76 100644 --- a/src/runtime/virtcontainers/utils/utils.go +++ b/src/runtime/virtcontainers/utils/utils.go @@ -458,3 +458,19 @@ func getAllParentPaths(path string) []string { // remove the "/" or "." from the return result return paths[1:] } + +// In Cloud Hypervisor, as well as in Firecracker, the crate used by the VMMs +// accepts the size of rate limiter in scaling factors of 2^10(1024). +// But in kata-defined rate limiter, for better Human-readability, we prefer +// scaling factors of 10^3(1000). +// +// func revertBytes reverts num from scaling factors of 1000 to 1024, e.g. +// 10000000(10MB) to 10485760. +func RevertBytes(num uint64) uint64 { + a := num / 1000 + b := num % 1000 + if a == 0 { + return num + } + return 1024*RevertBytes(a) + b +} diff --git a/src/runtime/virtcontainers/utils/utils_test.go b/src/runtime/virtcontainers/utils/utils_test.go index aac6fef909..8f3d0eed26 100644 --- a/src/runtime/virtcontainers/utils/utils_test.go +++ b/src/runtime/virtcontainers/utils/utils_test.go @@ -569,3 +569,14 @@ func TestGetAllParentPaths(t *testing.T) { assert.Equal(tc.parents, getAllParentPaths(tc.targetPath)) } } + +func TestRevertBytes(t *testing.T) { + assert := assert.New(t) + + //10MB + testNum := uint64(10000000) + expectedNum := uint64(10485760) + + num := RevertBytes(testNum) + assert.Equal(expectedNum, num) +} From 00a5b1bda92f11e6ba9f5373260fc6afb873845c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 21 Apr 2022 03:29:07 +0200 Subject: [PATCH 04/11] utils: Define DefaultRateLimiterRefillTimeMilliSecs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Firecracker's driver doesn't expose the RefillTime option of the rate limiter to the user. Instead, it uses a contant value of 1000 miliseconds (1 second). As we're following Firecracker's driver implementation, let's expose create a new constant, use it as part of the Firecracker's driver, and later on re-use it as part of the Cloud Hypervisor's driver. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/fc.go | 2 +- src/runtime/virtcontainers/utils/utils.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/runtime/virtcontainers/fc.go b/src/runtime/virtcontainers/fc.go index cb6e3a2601..384ce15a12 100644 --- a/src/runtime/virtcontainers/fc.go +++ b/src/runtime/virtcontainers/fc.go @@ -930,7 +930,7 @@ func (fc *firecracker) fcAddNetDevice(ctx context.Context, endpoint Endpoint) { // The implementation of rate limiter is based on TBF. // Rate Limiter defines a token bucket with a maximum capacity (size) to store tokens, and an interval for refilling purposes (refill_time). // The refill-rate is derived from size and refill_time, and it is the constant rate at which the tokens replenish. - refillTime := uint64(1000) + refillTime := uint64(utils.DefaultRateLimiterRefillTimeMilliSecs) var rxRateLimiter models.RateLimiter rxSize := fc.config.RxRateLimiterMaxRate if rxSize > 0 { diff --git a/src/runtime/virtcontainers/utils/utils.go b/src/runtime/virtcontainers/utils/utils.go index 048446fd76..5a6bb71501 100644 --- a/src/runtime/virtcontainers/utils/utils.go +++ b/src/runtime/virtcontainers/utils/utils.go @@ -25,6 +25,11 @@ const cpBinaryName = "cp" const fileMode0755 = os.FileMode(0755) +// The DefaultRateLimiterRefillTime is used for calculating the rate at +// which a TokenBucket is replinished, in cases where a RateLimiter is +// applied to either network or disk I/O. +const DefaultRateLimiterRefillTimeMilliSecs = 1000 + // MibToBytesShift the number to shift needed to convert MiB to Bytes const MibToBytesShift = 20 From 1cf946929742a235e455387bf52bf9778b8236bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Tue, 19 Apr 2022 13:22:27 +0200 Subject: [PATCH 05/11] clh: Implement the Network RateLimiter logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's take advantage of the newly added NetRateLimiter* options and apply those to the network device configuration. The logic here is quite similar to the one already present in the Firecracker's driver, with the main difference being the single Inbound / Outbound MaxRate and the presence of both Bandwidth and Operations rate limiter. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/clh.go | 44 ++++++ src/runtime/virtcontainers/clh_test.go | 201 +++++++++++++++++++++++-- 2 files changed, 234 insertions(+), 11 deletions(-) diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index d014150666..30dc42c470 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -1304,6 +1304,44 @@ func (clh *cloudHypervisor) addVSock(cid int64, path string) { clh.vmconfig.Vsock = chclient.NewVsockConfig(cid, path) } +func (clh *cloudHypervisor) getRateLimiterConfig(bwSize, bwOneTimeBurst, opsSize, opsOneTimeBurst int64) *chclient.RateLimiterConfig { + if bwSize == 0 && opsSize == 0 { + return nil + } + + rateLimiterConfig := chclient.NewRateLimiterConfig() + + if bwSize != 0 { + bwTokenBucket := chclient.NewTokenBucket(bwSize, int64(utils.DefaultRateLimiterRefillTimeMilliSecs)) + + if bwOneTimeBurst != 0 { + bwTokenBucket.SetOneTimeBurst(bwOneTimeBurst) + } + + rateLimiterConfig.SetBandwidth(*bwTokenBucket) + } + + if opsSize != 0 { + opsTokenBucket := chclient.NewTokenBucket(opsSize, int64(utils.DefaultRateLimiterRefillTimeMilliSecs)) + + if opsOneTimeBurst != 0 { + opsTokenBucket.SetOneTimeBurst(opsOneTimeBurst) + } + + rateLimiterConfig.SetOps(*opsTokenBucket) + } + + return rateLimiterConfig +} + +func (clh *cloudHypervisor) getNetRateLimiterConfig() *chclient.RateLimiterConfig { + return clh.getRateLimiterConfig( + int64(utils.RevertBytes(uint64(clh.config.NetRateLimiterBwMaxRate/8))), + int64(utils.RevertBytes(uint64(clh.config.NetRateLimiterBwOneTimeBurst/8))), + clh.config.NetRateLimiterOpsMaxRate, + clh.config.NetRateLimiterOpsOneTimeBurst) +} + func (clh *cloudHypervisor) addNet(e Endpoint) error { clh.Logger().WithField("endpoint-type", e).Debugf("Adding Endpoint of type %v", e) @@ -1323,9 +1361,15 @@ func (clh *cloudHypervisor) addNet(e Endpoint) error { "tap": tapPath, }).Info("Adding Net") + netRateLimiterConfig := clh.getNetRateLimiterConfig() + net := chclient.NewNetConfig() net.Mac = &mac net.Tap = &tapPath + if netRateLimiterConfig != nil { + net.SetRateLimiterConfig(*netRateLimiterConfig) + } + if clh.vmconfig.Net != nil { *clh.vmconfig.Net = append(*clh.vmconfig.Net, *net) } else { diff --git a/src/runtime/virtcontainers/clh_test.go b/src/runtime/virtcontainers/clh_test.go index 9bfd2e62ee..a764910f49 100644 --- a/src/runtime/virtcontainers/clh_test.go +++ b/src/runtime/virtcontainers/clh_test.go @@ -52,17 +52,21 @@ func newClhConfig() (HypervisorConfig, error) { } return HypervisorConfig{ - KernelPath: testClhKernelPath, - ImagePath: testClhImagePath, - HypervisorPath: testClhPath, - NumVCPUs: defaultVCPUs, - BlockDeviceDriver: config.VirtioBlock, - MemorySize: defaultMemSzMiB, - DefaultBridges: defaultBridges, - DefaultMaxVCPUs: uint32(64), - SharedFS: config.VirtioFS, - VirtioFSCache: virtioFsCacheAlways, - VirtioFSDaemon: testVirtiofsdPath, + KernelPath: testClhKernelPath, + ImagePath: testClhImagePath, + HypervisorPath: testClhPath, + NumVCPUs: defaultVCPUs, + BlockDeviceDriver: config.VirtioBlock, + MemorySize: defaultMemSzMiB, + DefaultBridges: defaultBridges, + DefaultMaxVCPUs: uint32(64), + SharedFS: config.VirtioFS, + VirtioFSCache: virtioFsCacheAlways, + VirtioFSDaemon: testVirtiofsdPath, + NetRateLimiterBwMaxRate: int64(0), + NetRateLimiterBwOneTimeBurst: int64(0), + NetRateLimiterOpsMaxRate: int64(0), + NetRateLimiterOpsOneTimeBurst: int64(0), }, nil } @@ -191,6 +195,181 @@ func TestCloudHypervisorAddNetCheckEnpointTypes(t *testing.T) { } } +// Check AddNet properly sets up the network rate limiter +func TestCloudHypervisorNetRateLimiter(t *testing.T) { + assert := assert.New(t) + + tapPath := "/path/to/tap" + + validVeth := &VethEndpoint{} + validVeth.NetPair.TapInterface.TAPIface.Name = tapPath + + type args struct { + bwMaxRate int64 + bwOneTimeBurst int64 + opsMaxRate int64 + opsOneTimeBurst int64 + } + + //nolint: govet + tests := []struct { + name string + args args + expectsRateLimiter bool + expectsBwBucketToken bool + expectsOpsBucketToken bool + }{ + // Bandwidth + { + "Bandwidth | max rate with one time burst", + args{ + bwMaxRate: int64(1000), + bwOneTimeBurst: int64(10000), + }, + true, // expectsRateLimiter + true, // expectsBwBucketToken + false, // expectsOpsBucketToken + }, + { + "Bandwidth | max rate without one time burst", + args{ + bwMaxRate: int64(1000), + }, + true, // expectsRateLimiter + true, // expectsBwBucketToken + false, // expectsOpsBucketToken + }, + { + "Bandwidth | no max rate with one time burst", + args{ + bwOneTimeBurst: int64(10000), + }, + false, // expectsRateLimiter + false, // expectsBwBucketToken + false, // expectsOpsBucketToken + }, + { + "Bandwidth | no max rate and no one time burst", + args{}, + false, // expectsRateLimiter + false, // expectsBwBucketToken + false, // expectsOpsBucketToken + }, + + // Operations + { + "Operations | max rate with one time burst", + args{ + opsMaxRate: int64(1000), + opsOneTimeBurst: int64(10000), + }, + true, // expectsRateLimiter + false, // expectsBwBucketToken + true, // expectsOpsBucketToken + }, + { + "Operations | max rate without one time burst", + args{ + opsMaxRate: int64(1000), + }, + true, // expectsRateLimiter + false, // expectsBwBucketToken + true, // expectsOpsBucketToken + }, + { + "Operations | no max rate with one time burst", + args{ + opsOneTimeBurst: int64(10000), + }, + false, // expectsRateLimiter + false, // expectsBwBucketToken + false, // expectsOpsBucketToken + }, + { + "Operations | no max rate and no one time burst", + args{}, + false, // expectsRateLimiter + false, // expectsBwBucketToken + false, // expectsOpsBucketToken + }, + + // Bandwidth and Operations + { + "Bandwidth and Operations | max rate with one time burst", + args{ + bwMaxRate: int64(1000), + bwOneTimeBurst: int64(10000), + opsMaxRate: int64(1000), + opsOneTimeBurst: int64(10000), + }, + true, // expectsRateLimiter + true, // expectsBwBucketToken + true, // expectsOpsBucketToken + }, + { + "Bandwidth and Operations | max rate without one time burst", + args{ + bwMaxRate: int64(1000), + opsMaxRate: int64(1000), + }, + true, // expectsRateLimiter + true, // expectsBwBucketToken + true, // expectsOpsBucketToken + }, + { + "Bandwidth and Operations | no max rate with one time burst", + args{ + bwOneTimeBurst: int64(10000), + opsOneTimeBurst: int64(10000), + }, + false, // expectsRateLimiter + false, // expectsBwBucketToken + false, // expectsOpsBucketToken + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clhConfig, err := newClhConfig() + assert.NoError(err) + + clhConfig.NetRateLimiterBwMaxRate = tt.args.bwMaxRate + clhConfig.NetRateLimiterBwOneTimeBurst = tt.args.bwOneTimeBurst + clhConfig.NetRateLimiterOpsMaxRate = tt.args.opsMaxRate + clhConfig.NetRateLimiterOpsOneTimeBurst = tt.args.opsOneTimeBurst + + clh := &cloudHypervisor{} + clh.config = clhConfig + clh.APIClient = &clhClientMock{} + + if err := clh.addNet(validVeth); err != nil { + t.Errorf("cloudHypervisor.addNet() error = %v", err) + } else { + netConfig := (*clh.vmconfig.Net)[0] + + assert.Equal(netConfig.HasRateLimiterConfig(), tt.expectsRateLimiter) + if tt.expectsRateLimiter { + rateLimiterConfig := netConfig.GetRateLimiterConfig() + assert.Equal(rateLimiterConfig.HasBandwidth(), tt.expectsBwBucketToken) + assert.Equal(rateLimiterConfig.HasOps(), tt.expectsOpsBucketToken) + + if tt.expectsBwBucketToken { + bwBucketToken := rateLimiterConfig.GetBandwidth() + assert.Equal(bwBucketToken.GetSize(), int64(utils.RevertBytes(uint64(tt.args.bwMaxRate/8)))) + assert.Equal(bwBucketToken.GetOneTimeBurst(), int64(utils.RevertBytes(uint64(tt.args.bwOneTimeBurst/8)))) + } + + if tt.expectsOpsBucketToken { + opsBucketToken := rateLimiterConfig.GetOps() + assert.Equal(opsBucketToken.GetSize(), int64(tt.args.opsMaxRate)) + assert.Equal(opsBucketToken.GetOneTimeBurst(), int64(tt.args.opsOneTimeBurst)) + } + } + } + }) + } +} + func TestCloudHypervisorBootVM(t *testing.T) { clh := &cloudHypervisor{} clh.APIClient = &clhClientMock{} From 5b18575dfed267854ce23a022b7a922bd71c17ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 25 Apr 2022 15:30:21 +0200 Subject: [PATCH 06/11] hypervisor: Add disk bandwidth and operations rate limiters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the disk counterpart of the what was introduced for the network as part of the previous commits in this series. The newly added fields are: * DiskRateLimiterBwMaxRate, defined in bits per second, which is used to control the network I/O bandwidth at the VM level. * DiskRateLimiterBwOneTimeBurst, also defined in bits per second, which is used to define an *initial* max rate, which doesn't replenish. * DiskRateLimiterOpsMaxRate, the operations per second equivalent of the DiskRateLimiterBwMaxRate. * DiskRateLimiterOpsOneTimeBurst, the operations per second equivalent of the DiskRateLimiterBwOneTimeBurst. For now those extra fields have only been added to the hypervisor's configuration and they'll be used in the coming patches of this very same series. Signed-off-by: Fabiano Fidêncio --- src/runtime/pkg/katautils/config.go | 168 ++++++++++++++--------- src/runtime/virtcontainers/hypervisor.go | 18 +++ 2 files changed, 118 insertions(+), 68 deletions(-) diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 3cb37780db..0401b21802 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -74,74 +74,78 @@ type factory struct { } type hypervisor struct { - Path string `toml:"path"` - JailerPath string `toml:"jailer_path"` - Kernel string `toml:"kernel"` - CtlPath string `toml:"ctlpath"` - Initrd string `toml:"initrd"` - Image string `toml:"image"` - Firmware string `toml:"firmware"` - FirmwareVolume string `toml:"firmware_volume"` - MachineAccelerators string `toml:"machine_accelerators"` - CPUFeatures string `toml:"cpu_features"` - KernelParams string `toml:"kernel_params"` - MachineType string `toml:"machine_type"` - BlockDeviceDriver string `toml:"block_device_driver"` - EntropySource string `toml:"entropy_source"` - SharedFS string `toml:"shared_fs"` - VirtioFSDaemon string `toml:"virtio_fs_daemon"` - VirtioFSCache string `toml:"virtio_fs_cache"` - VhostUserStorePath string `toml:"vhost_user_store_path"` - FileBackedMemRootDir string `toml:"file_mem_backend"` - GuestHookPath string `toml:"guest_hook_path"` - GuestMemoryDumpPath string `toml:"guest_memory_dump_path"` - HypervisorPathList []string `toml:"valid_hypervisor_paths"` - JailerPathList []string `toml:"valid_jailer_paths"` - CtlPathList []string `toml:"valid_ctlpaths"` - VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` - VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` - PFlashList []string `toml:"pflashes"` - VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"` - FileBackedMemRootList []string `toml:"valid_file_mem_backends"` - EntropySourceList []string `toml:"valid_entropy_sources"` - EnableAnnotations []string `toml:"enable_annotations"` - RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` - TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` - MemOffset uint64 `toml:"memory_offset"` - NetRateLimiterBwMaxRate int64 `toml:"net_rate_limiter_bw_max_rate"` - NetRateLimiterBwOneTimeBurst int64 `toml:"net_rate_limiter_bw_one_time_burst"` - NetRateLimiterOpsMaxRate int64 `toml:"net_rate_limiter_ops_max_rate"` - NetRateLimiterOpsOneTimeBurst int64 `toml:"net_rate_limiter_ops_one_time_burst"` - VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` - DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"` - MemorySize uint32 `toml:"default_memory"` - MemSlots uint32 `toml:"memory_slots"` - DefaultBridges uint32 `toml:"default_bridges"` - Msize9p uint32 `toml:"msize_9p"` - PCIeRootPort uint32 `toml:"pcie_root_port"` - NumVCPUs int32 `toml:"default_vcpus"` - BlockDeviceCacheSet bool `toml:"block_device_cache_set"` - BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` - BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` - EnableVhostUserStore bool `toml:"enable_vhost_user_store"` - DisableBlockDeviceUse bool `toml:"disable_block_device_use"` - MemPrealloc bool `toml:"enable_mem_prealloc"` - HugePages bool `toml:"enable_hugepages"` - VirtioMem bool `toml:"enable_virtio_mem"` - IOMMU bool `toml:"enable_iommu"` - IOMMUPlatform bool `toml:"enable_iommu_platform"` - Debug bool `toml:"enable_debug"` - DisableNestingChecks bool `toml:"disable_nesting_checks"` - EnableIOThreads bool `toml:"enable_iothreads"` - DisableImageNvdimm bool `toml:"disable_image_nvdimm"` - HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"` - DisableVhostNet bool `toml:"disable_vhost_net"` - GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"` - ConfidentialGuest bool `toml:"confidential_guest"` - GuestSwap bool `toml:"enable_guest_swap"` - Rootless bool `toml:"rootless"` - DisableSeccomp bool `toml:"disable_seccomp"` - DisableSeLinux bool `toml:"disable_selinux"` + Path string `toml:"path"` + JailerPath string `toml:"jailer_path"` + Kernel string `toml:"kernel"` + CtlPath string `toml:"ctlpath"` + Initrd string `toml:"initrd"` + Image string `toml:"image"` + Firmware string `toml:"firmware"` + FirmwareVolume string `toml:"firmware_volume"` + MachineAccelerators string `toml:"machine_accelerators"` + CPUFeatures string `toml:"cpu_features"` + KernelParams string `toml:"kernel_params"` + MachineType string `toml:"machine_type"` + BlockDeviceDriver string `toml:"block_device_driver"` + EntropySource string `toml:"entropy_source"` + SharedFS string `toml:"shared_fs"` + VirtioFSDaemon string `toml:"virtio_fs_daemon"` + VirtioFSCache string `toml:"virtio_fs_cache"` + VhostUserStorePath string `toml:"vhost_user_store_path"` + FileBackedMemRootDir string `toml:"file_mem_backend"` + GuestHookPath string `toml:"guest_hook_path"` + GuestMemoryDumpPath string `toml:"guest_memory_dump_path"` + HypervisorPathList []string `toml:"valid_hypervisor_paths"` + JailerPathList []string `toml:"valid_jailer_paths"` + CtlPathList []string `toml:"valid_ctlpaths"` + VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` + VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` + PFlashList []string `toml:"pflashes"` + VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"` + FileBackedMemRootList []string `toml:"valid_file_mem_backends"` + EntropySourceList []string `toml:"valid_entropy_sources"` + EnableAnnotations []string `toml:"enable_annotations"` + RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` + TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` + MemOffset uint64 `toml:"memory_offset"` + DiskRateLimiterBwMaxRate int64 `toml:"disk_rate_limiter_bw_max_rate"` + DiskRateLimiterBwOneTimeBurst int64 `toml:"disk_rate_limiter_bw_one_time_burst"` + DiskRateLimiterOpsMaxRate int64 `toml:"disk_rate_limiter_ops_max_rate"` + DiskRateLimiterOpsOneTimeBurst int64 `toml:"disk_rate_limiter_ops_one_time_burst"` + NetRateLimiterBwMaxRate int64 `toml:"net_rate_limiter_bw_max_rate"` + NetRateLimiterBwOneTimeBurst int64 `toml:"net_rate_limiter_bw_one_time_burst"` + NetRateLimiterOpsMaxRate int64 `toml:"net_rate_limiter_ops_max_rate"` + NetRateLimiterOpsOneTimeBurst int64 `toml:"net_rate_limiter_ops_one_time_burst"` + VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` + DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"` + MemorySize uint32 `toml:"default_memory"` + MemSlots uint32 `toml:"memory_slots"` + DefaultBridges uint32 `toml:"default_bridges"` + Msize9p uint32 `toml:"msize_9p"` + PCIeRootPort uint32 `toml:"pcie_root_port"` + NumVCPUs int32 `toml:"default_vcpus"` + BlockDeviceCacheSet bool `toml:"block_device_cache_set"` + BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` + BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` + EnableVhostUserStore bool `toml:"enable_vhost_user_store"` + DisableBlockDeviceUse bool `toml:"disable_block_device_use"` + MemPrealloc bool `toml:"enable_mem_prealloc"` + HugePages bool `toml:"enable_hugepages"` + VirtioMem bool `toml:"enable_virtio_mem"` + IOMMU bool `toml:"enable_iommu"` + IOMMUPlatform bool `toml:"enable_iommu_platform"` + Debug bool `toml:"enable_debug"` + DisableNestingChecks bool `toml:"disable_nesting_checks"` + EnableIOThreads bool `toml:"enable_iothreads"` + DisableImageNvdimm bool `toml:"disable_image_nvdimm"` + HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"` + DisableVhostNet bool `toml:"disable_vhost_net"` + GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"` + ConfidentialGuest bool `toml:"confidential_guest"` + GuestSwap bool `toml:"enable_guest_swap"` + Rootless bool `toml:"rootless"` + DisableSeccomp bool `toml:"disable_seccomp"` + DisableSeLinux bool `toml:"disable_selinux"` } type runtime struct { @@ -486,6 +490,34 @@ func (h hypervisor) getInitrdAndImage() (initrd string, image string, err error) return } +func (h hypervisor) getDiskRateLimiterBwMaxRate() int64 { + return h.DiskRateLimiterBwMaxRate +} + +func (h hypervisor) getDiskRateLimiterBwOneTimeBurst() int64 { + if h.DiskRateLimiterBwOneTimeBurst != 0 && h.getDiskRateLimiterBwMaxRate() == 0 { + kataUtilsLogger.Warn("The DiskRateLimiterBwOneTimeBurst is set but DiskRateLimiterBwMaxRate is not set, this option will be ignored.") + + h.DiskRateLimiterBwOneTimeBurst = 0 + } + + return h.DiskRateLimiterBwOneTimeBurst +} + +func (h hypervisor) getDiskRateLimiterOpsMaxRate() int64 { + return h.DiskRateLimiterOpsMaxRate +} + +func (h hypervisor) getDiskRateLimiterOpsOneTimeBurst() int64 { + if h.DiskRateLimiterOpsOneTimeBurst != 0 && h.getDiskRateLimiterOpsMaxRate() == 0 { + kataUtilsLogger.Warn("The DiskRateLimiterOpsOneTimeBurst is set but DiskRateLimiterOpsMaxRate is not set, this option will be ignored.") + + h.DiskRateLimiterOpsOneTimeBurst = 0 + } + + return h.DiskRateLimiterOpsOneTimeBurst +} + func (h hypervisor) getRxRateLimiterCfg() uint64 { return h.RxRateLimiterMaxRate } diff --git a/src/runtime/virtcontainers/hypervisor.go b/src/runtime/virtcontainers/hypervisor.go index fa9b668673..c689eae3c9 100644 --- a/src/runtime/virtcontainers/hypervisor.go +++ b/src/runtime/virtcontainers/hypervisor.go @@ -380,6 +380,24 @@ type HypervisorConfig struct { // Enable SGX. Hardware-based isolation and memory encryption. SGXEPCSize int64 + // DiskRateLimiterBwRate is used to control disk I/O bandwidth on VM level. + // The same value, defined in bits per second, is used for inbound and outbound bandwidth. + DiskRateLimiterBwMaxRate int64 + + // DiskRateLimiterBwOneTimeBurst is used to control disk I/O bandwidth on VM level. + // This increases the initial max rate and this initial extra credit does *NOT* replenish + // and can be used for an *initial* burst of data. + DiskRateLimiterBwOneTimeBurst int64 + + // DiskRateLimiterOpsRate is used to control disk I/O operations on VM level. + // The same value, defined in operations per second, is used for inbound and outbound bandwidth. + DiskRateLimiterOpsMaxRate int64 + + // DiskRateLimiterOpsOneTimeBurst is used to control disk I/O operations on VM level. + // This increases the initial max rate and this initial extra credit does *NOT* replenish + // and can be used for an *initial* burst of data. + DiskRateLimiterOpsOneTimeBurst int64 + // RxRateLimiterMaxRate is used to control network I/O inbound bandwidth on VM level. RxRateLimiterMaxRate uint64 From 511f7f822d28843ee64571129cbd24f4816ab048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 25 Apr 2022 15:35:15 +0200 Subject: [PATCH 07/11] config: Add DiskRateLimiter* to Cloud Hypervisor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's add the newly added disk rate limiter configurations to the Cloud Hypervisor's hypervisor configuration. Right now those are not used anywhere, and there's absolutely no way the users can set those up. That's coming later in this very same series. Signed-off-by: Fabiano Fidêncio --- src/runtime/pkg/katautils/config.go | 104 ++++++++++++----------- src/runtime/pkg/katautils/config_test.go | 43 ++++++++-- 2 files changed, 88 insertions(+), 59 deletions(-) diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index 0401b21802..ec02b4bc26 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -892,56 +892,60 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { } return vc.HypervisorConfig{ - HypervisorPath: hypervisor, - HypervisorPathList: h.HypervisorPathList, - KernelPath: kernel, - InitrdPath: initrd, - ImagePath: image, - FirmwarePath: firmware, - MachineAccelerators: machineAccelerators, - KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)), - HypervisorMachineType: machineType, - NumVCPUs: h.defaultVCPUs(), - DefaultMaxVCPUs: h.defaultMaxVCPUs(), - MemorySize: h.defaultMemSz(), - MemSlots: h.defaultMemSlots(), - MemOffset: h.defaultMemOffset(), - VirtioMem: h.VirtioMem, - EntropySource: h.GetEntropySource(), - EntropySourceList: h.EntropySourceList, - DefaultBridges: h.defaultBridges(), - DisableBlockDeviceUse: h.DisableBlockDeviceUse, - SharedFS: sharedFS, - VirtioFSDaemon: h.VirtioFSDaemon, - VirtioFSDaemonList: h.VirtioFSDaemonList, - VirtioFSCacheSize: h.VirtioFSCacheSize, - VirtioFSCache: h.VirtioFSCache, - MemPrealloc: h.MemPrealloc, - HugePages: h.HugePages, - FileBackedMemRootDir: h.FileBackedMemRootDir, - FileBackedMemRootList: h.FileBackedMemRootList, - Debug: h.Debug, - DisableNestingChecks: h.DisableNestingChecks, - BlockDeviceDriver: blockDriver, - BlockDeviceCacheSet: h.BlockDeviceCacheSet, - BlockDeviceCacheDirect: h.BlockDeviceCacheDirect, - BlockDeviceCacheNoflush: h.BlockDeviceCacheNoflush, - EnableIOThreads: h.EnableIOThreads, - Msize9p: h.msize9p(), - HotplugVFIOOnRootBus: h.HotplugVFIOOnRootBus, - PCIeRootPort: h.PCIeRootPort, - DisableVhostNet: true, - GuestHookPath: h.guestHookPath(), - VirtioFSExtraArgs: h.VirtioFSExtraArgs, - SGXEPCSize: defaultSGXEPCSize, - EnableAnnotations: h.EnableAnnotations, - DisableSeccomp: h.DisableSeccomp, - ConfidentialGuest: h.ConfidentialGuest, - DisableSeLinux: h.DisableSeLinux, - NetRateLimiterBwMaxRate: h.getNetRateLimiterBwMaxRate(), - NetRateLimiterBwOneTimeBurst: h.getNetRateLimiterBwOneTimeBurst(), - NetRateLimiterOpsMaxRate: h.getNetRateLimiterOpsMaxRate(), - NetRateLimiterOpsOneTimeBurst: h.getNetRateLimiterOpsOneTimeBurst(), + HypervisorPath: hypervisor, + HypervisorPathList: h.HypervisorPathList, + KernelPath: kernel, + InitrdPath: initrd, + ImagePath: image, + FirmwarePath: firmware, + MachineAccelerators: machineAccelerators, + KernelParams: vc.DeserializeParams(strings.Fields(kernelParams)), + HypervisorMachineType: machineType, + NumVCPUs: h.defaultVCPUs(), + DefaultMaxVCPUs: h.defaultMaxVCPUs(), + MemorySize: h.defaultMemSz(), + MemSlots: h.defaultMemSlots(), + MemOffset: h.defaultMemOffset(), + VirtioMem: h.VirtioMem, + EntropySource: h.GetEntropySource(), + EntropySourceList: h.EntropySourceList, + DefaultBridges: h.defaultBridges(), + DisableBlockDeviceUse: h.DisableBlockDeviceUse, + SharedFS: sharedFS, + VirtioFSDaemon: h.VirtioFSDaemon, + VirtioFSDaemonList: h.VirtioFSDaemonList, + VirtioFSCacheSize: h.VirtioFSCacheSize, + VirtioFSCache: h.VirtioFSCache, + MemPrealloc: h.MemPrealloc, + HugePages: h.HugePages, + FileBackedMemRootDir: h.FileBackedMemRootDir, + FileBackedMemRootList: h.FileBackedMemRootList, + Debug: h.Debug, + DisableNestingChecks: h.DisableNestingChecks, + BlockDeviceDriver: blockDriver, + BlockDeviceCacheSet: h.BlockDeviceCacheSet, + BlockDeviceCacheDirect: h.BlockDeviceCacheDirect, + BlockDeviceCacheNoflush: h.BlockDeviceCacheNoflush, + EnableIOThreads: h.EnableIOThreads, + Msize9p: h.msize9p(), + HotplugVFIOOnRootBus: h.HotplugVFIOOnRootBus, + PCIeRootPort: h.PCIeRootPort, + DisableVhostNet: true, + GuestHookPath: h.guestHookPath(), + VirtioFSExtraArgs: h.VirtioFSExtraArgs, + SGXEPCSize: defaultSGXEPCSize, + EnableAnnotations: h.EnableAnnotations, + DisableSeccomp: h.DisableSeccomp, + ConfidentialGuest: h.ConfidentialGuest, + DisableSeLinux: h.DisableSeLinux, + NetRateLimiterBwMaxRate: h.getNetRateLimiterBwMaxRate(), + NetRateLimiterBwOneTimeBurst: h.getNetRateLimiterBwOneTimeBurst(), + NetRateLimiterOpsMaxRate: h.getNetRateLimiterOpsMaxRate(), + NetRateLimiterOpsOneTimeBurst: h.getNetRateLimiterOpsOneTimeBurst(), + DiskRateLimiterBwMaxRate: h.getDiskRateLimiterBwMaxRate(), + DiskRateLimiterBwOneTimeBurst: h.getDiskRateLimiterBwOneTimeBurst(), + DiskRateLimiterOpsMaxRate: h.getDiskRateLimiterOpsMaxRate(), + DiskRateLimiterOpsOneTimeBurst: h.getDiskRateLimiterOpsOneTimeBurst(), }, nil } diff --git a/src/runtime/pkg/katautils/config_test.go b/src/runtime/pkg/katautils/config_test.go index 778f6ec6de..0bcac2137f 100644 --- a/src/runtime/pkg/katautils/config_test.go +++ b/src/runtime/pkg/katautils/config_test.go @@ -814,6 +814,10 @@ func TestNewClhHypervisorConfig(t *testing.T) { netRateLimiterBwOneTimeBurst := int64(1000) netRateLimiterOpsMaxRate := int64(0) netRateLimiterOpsOneTimeBurst := int64(1000) + diskRateLimiterBwMaxRate := int64(1000) + diskRateLimiterBwOneTimeBurst := int64(1000) + diskRateLimiterOpsMaxRate := int64(0) + diskRateLimiterOpsOneTimeBurst := int64(1000) for _, file := range []string{imagePath, hypervisorPath, kernelPath, virtioFsDaemon} { err := createEmptyFile(file) @@ -821,15 +825,19 @@ func TestNewClhHypervisorConfig(t *testing.T) { } hypervisor := hypervisor{ - Path: hypervisorPath, - Kernel: kernelPath, - Image: imagePath, - VirtioFSDaemon: virtioFsDaemon, - VirtioFSCache: "always", - NetRateLimiterBwMaxRate: netRateLimiterBwMaxRate, - NetRateLimiterBwOneTimeBurst: netRateLimiterBwOneTimeBurst, - NetRateLimiterOpsMaxRate: netRateLimiterOpsMaxRate, - NetRateLimiterOpsOneTimeBurst: netRateLimiterOpsOneTimeBurst, + Path: hypervisorPath, + Kernel: kernelPath, + Image: imagePath, + VirtioFSDaemon: virtioFsDaemon, + VirtioFSCache: "always", + NetRateLimiterBwMaxRate: netRateLimiterBwMaxRate, + NetRateLimiterBwOneTimeBurst: netRateLimiterBwOneTimeBurst, + NetRateLimiterOpsMaxRate: netRateLimiterOpsMaxRate, + NetRateLimiterOpsOneTimeBurst: netRateLimiterOpsOneTimeBurst, + DiskRateLimiterBwMaxRate: diskRateLimiterBwMaxRate, + DiskRateLimiterBwOneTimeBurst: diskRateLimiterBwOneTimeBurst, + DiskRateLimiterOpsMaxRate: diskRateLimiterOpsMaxRate, + DiskRateLimiterOpsOneTimeBurst: diskRateLimiterOpsOneTimeBurst, } config, err := newClhHypervisorConfig(hypervisor) if err != nil { @@ -876,6 +884,23 @@ func TestNewClhHypervisorConfig(t *testing.T) { if config.NetRateLimiterOpsOneTimeBurst != 0 { t.Errorf("Expected value for network operations one time burst %v, got %v", netRateLimiterOpsOneTimeBurst, config.NetRateLimiterOpsOneTimeBurst) } + + if config.DiskRateLimiterBwMaxRate != diskRateLimiterBwMaxRate { + t.Errorf("Expected value for disk bandwidth rate limiter %v, got %v", diskRateLimiterBwMaxRate, config.DiskRateLimiterBwMaxRate) + } + + if config.DiskRateLimiterBwOneTimeBurst != diskRateLimiterBwOneTimeBurst { + t.Errorf("Expected value for disk bandwidth one time burst %v, got %v", diskRateLimiterBwOneTimeBurst, config.DiskRateLimiterBwOneTimeBurst) + } + + if config.DiskRateLimiterOpsMaxRate != diskRateLimiterOpsMaxRate { + t.Errorf("Expected value for disk operations rate limiter %v, got %v", diskRateLimiterOpsMaxRate, config.DiskRateLimiterOpsMaxRate) + } + + // We expect 0 (zero) here as diskRateLimiterOpsMaxRate is not set (set to zero). + if config.DiskRateLimiterOpsOneTimeBurst != 0 { + t.Errorf("Expected value for disk operations one time burst %v, got %v", diskRateLimiterOpsOneTimeBurst, config.DiskRateLimiterOpsOneTimeBurst) + } } func TestHypervisorDefaults(t *testing.T) { From 63c4da03a925003e313d39f5e649ff0151479ae0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 25 Apr 2022 16:04:57 +0200 Subject: [PATCH 08/11] clh: Implement the Disk RateLimiter logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's take advantage of the newly added DiskRateLimiter* options and apply those to the network device configuration. The logic here is identical to the one already present in the Network part of Cloud Hypervisor's driver. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/clh.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index 30dc42c470..b18276a85b 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -442,6 +442,11 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net disk := chclient.NewDiskConfig(imagePath) disk.SetReadonly(true) + diskRateLimiterConfig := clh.getDiskRateLimiterConfig() + if diskRateLimiterConfig != nil { + disk.SetRateLimiterConfig(*diskRateLimiterConfig) + } + if clh.vmconfig.Disks != nil { *clh.vmconfig.Disks = append(*clh.vmconfig.Disks, *disk) } else { @@ -667,6 +672,11 @@ func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) erro clhDisk.VhostUser = func(b bool) *bool { return &b }(false) clhDisk.Id = &driveID + diskRateLimiterConfig := clh.getDiskRateLimiterConfig() + if diskRateLimiterConfig != nil { + clhDisk.SetRateLimiterConfig(*diskRateLimiterConfig) + } + pciInfo, _, err := cl.VmAddDiskPut(ctx, clhDisk) if err != nil { @@ -1342,6 +1352,14 @@ func (clh *cloudHypervisor) getNetRateLimiterConfig() *chclient.RateLimiterConfi clh.config.NetRateLimiterOpsOneTimeBurst) } +func (clh *cloudHypervisor) getDiskRateLimiterConfig() *chclient.RateLimiterConfig { + return clh.getRateLimiterConfig( + int64(utils.RevertBytes(uint64(clh.config.DiskRateLimiterBwMaxRate/8))), + int64(utils.RevertBytes(uint64(clh.config.DiskRateLimiterBwOneTimeBurst/8))), + clh.config.DiskRateLimiterOpsMaxRate, + clh.config.DiskRateLimiterOpsOneTimeBurst) +} + func (clh *cloudHypervisor) addNet(e Endpoint) error { clh.Logger().WithField("endpoint-type", e).Debugf("Adding Endpoint of type %v", e) From a88adabaaecbb448120d3e78a703fbf61e7900e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Tue, 19 Apr 2022 13:23:14 +0200 Subject: [PATCH 09/11] clh: Cloud Hypervisor has a built-in Rate Limiter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The notion of "built-in rate limiter" was added as part of bd8658e3627c450921c63bde3fe8d7458a01b242, and that commit considered that only Firecracker had a built-in rate limiter, which I think was the case when that was introduced (mid 2020). Nowadays, however, Cloud Hypervisor takes advantage of the very same crate used by Firecraker to do I/O throttling. Signed-off-by: Fabiano Fidêncio --- src/runtime/virtcontainers/clh.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index b18276a85b..41d00ee275 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -1497,5 +1497,5 @@ func (clh *cloudHypervisor) vmInfo() (chclient.VmInfo, error) { } func (clh *cloudHypervisor) IsRateLimiterBuiltin() bool { - return false + return true } From 7580bb5a78bf61cff350d0089e4eb74211e6a7f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Thu, 21 Apr 2022 00:58:56 +0200 Subject: [PATCH 10/11] clh: Expose net rate limiter config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With everything implemented, let's now expose the net rate limiter configuration options in the Cloud Hypervisor configuration file. Fixes: #4017 Signed-off-by: Fabiano Fidêncio --- src/runtime/config/configuration-clh.toml.in | 36 ++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in index b3cfbd5836..5dfe2082d8 100644 --- a/src/runtime/config/configuration-clh.toml.in +++ b/src/runtime/config/configuration-clh.toml.in @@ -180,6 +180,42 @@ block_device_driver = "virtio-blk" # but it will not abort container execution. #guest_hook_path = "/usr/share/oci/hooks" # +# These options are related to network rate limiter at the VMM level, and are +# based on the Cloud Hypervisor I/O throttling. Those are disabled by default +# and we strongly advise users to refer the Cloud Hypervisor official +# documentation for a better understanding of its internals: +# https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md +# +# Bandwidth rate limiter options +# +# net_rate_limiter_bw_max_rate controls network I/O bandwidth (size in bits/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#net_rate_limiter_bw_max_rate = 0 +# +# net_rate_limiter_bw_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is +# set to a non zero value. +#net_rate_limiter_bw_one_time_burst = 0 +# +# Operation rate limiter options +# +# net_rate_limiter_ops_max_rate controls network I/O bandwidth (size in ops/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#net_rate_limiter_ops_max_rate = 0 +# +# net_rate_limiter_ops_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is +# set to a non zero value. +#net_rate_limiter_ops_one_time_burst = 0 + [agent.@PROJECT_TYPE@] # If enabled, make the agent display debug-level messages. # (default: disabled) From b6467ddd73458324abe9b8c470a78d9a60f396ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Fid=C3=AAncio?= Date: Mon, 25 Apr 2022 16:53:58 +0200 Subject: [PATCH 11/11] clh: Expose disk rate limiter config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With everything implemented, let's now expose the disk rate limiter configuration options in the Cloud Hypervisor configuration file. Fixes: #4139 Signed-off-by: Fabiano Fidêncio --- src/runtime/config/configuration-clh.toml.in | 36 ++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/runtime/config/configuration-clh.toml.in b/src/runtime/config/configuration-clh.toml.in index 5dfe2082d8..41d4ae4930 100644 --- a/src/runtime/config/configuration-clh.toml.in +++ b/src/runtime/config/configuration-clh.toml.in @@ -215,6 +215,42 @@ block_device_driver = "virtio-blk" # This is *optional* and only takes effect if net_rate_limiter_bw_max_rate is # set to a non zero value. #net_rate_limiter_ops_one_time_burst = 0 +# +# These options are related to disk rate limiter at the VMM level, and are +# based on the Cloud Hypervisor I/O throttling. Those are disabled by default +# and we strongly advise users to refer the Cloud Hypervisor official +# documentation for a better understanding of its internals: +# https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/docs/io_throttling.md +# +# Bandwidth rate limiter options +# +# disk_rate_limiter_bw_max_rate controls disk I/O bandwidth (size in bits/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#disk_rate_limiter_bw_max_rate = 0 +# +# disk_rate_limiter_bw_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is +# set to a non zero value. +#disk_rate_limiter_bw_one_time_burst = 0 +# +# Operation rate limiter options +# +# disk_rate_limiter_ops_max_rate controls disk I/O bandwidth (size in ops/sec +# for SB/VM). +# The same value is used for inbound and outbound bandwidth. +# Default 0-sized value means unlimited rate. +#disk_rate_limiter_ops_max_rate = 0 +# +# disk_rate_limiter_ops_one_time_burst increases the initial max rate and this +# initial extra credit does *NOT* affect the overall limit and can be used for +# an *initial* burst of data. +# This is *optional* and only takes effect if disk_rate_limiter_bw_max_rate is +# set to a non zero value. +#disk_rate_limiter_ops_one_time_burst = 0 [agent.@PROJECT_TYPE@] # If enabled, make the agent display debug-level messages.