mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-06 06:03:28 +00:00
Merge pull request #7623 from fidencio/topic/runtime-improve-vcpu-allocation-on-host-side
runtime: Improve vCPU allocation for the VMMs
This commit is contained in:
commit
fd9b6d6837
@ -47,7 +47,7 @@ There are several kinds of Kata configurations and they are listed below.
|
|||||||
| `io.katacontainers.config.hypervisor.ctlpath` (R) | `string` | Path to the `acrnctl` binary for the ACRN hypervisor |
|
| `io.katacontainers.config.hypervisor.ctlpath` (R) | `string` | Path to the `acrnctl` binary for the ACRN hypervisor |
|
||||||
| `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor |
|
| `io.katacontainers.config.hypervisor.default_max_vcpus` | uint32| the maximum number of vCPUs allocated for the VM by the hypervisor |
|
||||||
| `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` |
|
| `io.katacontainers.config.hypervisor.default_memory` | uint32| the memory assigned for a VM by the hypervisor in `MiB` |
|
||||||
| `io.katacontainers.config.hypervisor.default_vcpus` | uint32| the default vCPUs assigned for a VM by the hypervisor |
|
| `io.katacontainers.config.hypervisor.default_vcpus` | float32| the default vCPUs assigned for a VM by the hypervisor |
|
||||||
| `io.katacontainers.config.hypervisor.disable_block_device_use` | `boolean` | disallow a block device from being used |
|
| `io.katacontainers.config.hypervisor.disable_block_device_use` | `boolean` | disallow a block device from being used |
|
||||||
| `io.katacontainers.config.hypervisor.disable_image_nvdimm` | `boolean` | specify if a `nvdimm` device should be used as rootfs for the guest (QEMU) |
|
| `io.katacontainers.config.hypervisor.disable_image_nvdimm` | `boolean` | specify if a `nvdimm` device should be used as rootfs for the guest (QEMU) |
|
||||||
| `io.katacontainers.config.hypervisor.disable_vhost_net` | `boolean` | specify if `vhost-net` is not available on the host |
|
| `io.katacontainers.config.hypervisor.disable_vhost_net` | `boolean` | specify if `vhost-net` is not available on the host |
|
||||||
|
@ -134,7 +134,7 @@ func makeRuntimeConfig(prefixDir string) (configFile string, ociConfig oci.Runti
|
|||||||
HotPlugVFIO: hotPlugVFIO,
|
HotPlugVFIO: hotPlugVFIO,
|
||||||
ColdPlugVFIO: coldPlugVFIO,
|
ColdPlugVFIO: coldPlugVFIO,
|
||||||
DisableNewNetNs: disableNewNetNs,
|
DisableNewNetNs: disableNewNetNs,
|
||||||
DefaultVCPUCount: hypConfig.NumVCPUs,
|
DefaultVCPUCount: hypConfig.NumVCPUs(),
|
||||||
DefaultMaxVCPUCount: hypConfig.DefaultMaxVCPUs,
|
DefaultMaxVCPUCount: hypConfig.DefaultMaxVCPUs,
|
||||||
DefaultMemSize: hypConfig.MemorySize,
|
DefaultMemSize: hypConfig.MemorySize,
|
||||||
DefaultMsize9p: hypConfig.Msize9p,
|
DefaultMsize9p: hypConfig.Msize9p,
|
||||||
|
@ -133,7 +133,7 @@ type hypervisor struct {
|
|||||||
MemSlots uint32 `toml:"memory_slots"`
|
MemSlots uint32 `toml:"memory_slots"`
|
||||||
DefaultBridges uint32 `toml:"default_bridges"`
|
DefaultBridges uint32 `toml:"default_bridges"`
|
||||||
Msize9p uint32 `toml:"msize_9p"`
|
Msize9p uint32 `toml:"msize_9p"`
|
||||||
NumVCPUs int32 `toml:"default_vcpus"`
|
NumVCPUs float32 `toml:"default_vcpus"`
|
||||||
BlockDeviceCacheSet bool `toml:"block_device_cache_set"`
|
BlockDeviceCacheSet bool `toml:"block_device_cache_set"`
|
||||||
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
|
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
|
||||||
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
|
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
|
||||||
@ -395,17 +395,17 @@ func getCurrentCpuNum() uint32 {
|
|||||||
return cpu
|
return cpu
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h hypervisor) defaultVCPUs() uint32 {
|
func (h hypervisor) defaultVCPUs() float32 {
|
||||||
numCPUs := getCurrentCpuNum()
|
numCPUs := float32(getCurrentCpuNum())
|
||||||
|
|
||||||
if h.NumVCPUs < 0 || h.NumVCPUs > int32(numCPUs) {
|
if h.NumVCPUs < 0 || h.NumVCPUs > numCPUs {
|
||||||
return numCPUs
|
return numCPUs
|
||||||
}
|
}
|
||||||
if h.NumVCPUs == 0 { // or unspecified
|
if h.NumVCPUs == 0 { // or unspecified
|
||||||
return defaultVCPUCount
|
return float32(defaultVCPUCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
return uint32(h.NumVCPUs)
|
return h.NumVCPUs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h hypervisor) defaultMaxVCPUs() uint32 {
|
func (h hypervisor) defaultMaxVCPUs() uint32 {
|
||||||
@ -723,7 +723,7 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
|||||||
RootfsType: rootfsType,
|
RootfsType: rootfsType,
|
||||||
FirmwarePath: firmware,
|
FirmwarePath: firmware,
|
||||||
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
||||||
NumVCPUs: h.defaultVCPUs(),
|
NumVCPUsF: h.defaultVCPUs(),
|
||||||
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
||||||
MemorySize: h.defaultMemSz(),
|
MemorySize: h.defaultMemSz(),
|
||||||
MemSlots: h.defaultMemSlots(),
|
MemSlots: h.defaultMemSlots(),
|
||||||
@ -857,7 +857,7 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
|||||||
CPUFeatures: cpuFeatures,
|
CPUFeatures: cpuFeatures,
|
||||||
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
||||||
HypervisorMachineType: machineType,
|
HypervisorMachineType: machineType,
|
||||||
NumVCPUs: h.defaultVCPUs(),
|
NumVCPUsF: h.defaultVCPUs(),
|
||||||
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
||||||
MemorySize: h.defaultMemSz(),
|
MemorySize: h.defaultMemSz(),
|
||||||
MemSlots: h.defaultMemSlots(),
|
MemSlots: h.defaultMemSlots(),
|
||||||
@ -968,7 +968,7 @@ func newAcrnHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
|||||||
HypervisorCtlPathList: h.CtlPathList,
|
HypervisorCtlPathList: h.CtlPathList,
|
||||||
FirmwarePath: firmware,
|
FirmwarePath: firmware,
|
||||||
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
||||||
NumVCPUs: h.defaultVCPUs(),
|
NumVCPUsF: h.defaultVCPUs(),
|
||||||
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
||||||
MemorySize: h.defaultMemSz(),
|
MemorySize: h.defaultMemSz(),
|
||||||
MemSlots: h.defaultMemSlots(),
|
MemSlots: h.defaultMemSlots(),
|
||||||
@ -1059,7 +1059,7 @@ func newClhHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
|||||||
MachineAccelerators: machineAccelerators,
|
MachineAccelerators: machineAccelerators,
|
||||||
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
||||||
HypervisorMachineType: machineType,
|
HypervisorMachineType: machineType,
|
||||||
NumVCPUs: h.defaultVCPUs(),
|
NumVCPUsF: h.defaultVCPUs(),
|
||||||
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
||||||
MemorySize: h.defaultMemSz(),
|
MemorySize: h.defaultMemSz(),
|
||||||
MemSlots: h.defaultMemSlots(),
|
MemSlots: h.defaultMemSlots(),
|
||||||
@ -1132,7 +1132,7 @@ func newDragonballHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
|
|||||||
ImagePath: image,
|
ImagePath: image,
|
||||||
RootfsType: rootfsType,
|
RootfsType: rootfsType,
|
||||||
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
||||||
NumVCPUs: h.defaultVCPUs(),
|
NumVCPUsF: h.defaultVCPUs(),
|
||||||
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
DefaultMaxVCPUs: h.defaultMaxVCPUs(),
|
||||||
MemorySize: h.defaultMemSz(),
|
MemorySize: h.defaultMemSz(),
|
||||||
MemSlots: h.defaultMemSlots(),
|
MemSlots: h.defaultMemSlots(),
|
||||||
@ -1297,7 +1297,7 @@ func GetDefaultHypervisorConfig() vc.HypervisorConfig {
|
|||||||
MachineAccelerators: defaultMachineAccelerators,
|
MachineAccelerators: defaultMachineAccelerators,
|
||||||
CPUFeatures: defaultCPUFeatures,
|
CPUFeatures: defaultCPUFeatures,
|
||||||
HypervisorMachineType: defaultMachineType,
|
HypervisorMachineType: defaultMachineType,
|
||||||
NumVCPUs: defaultVCPUCount,
|
NumVCPUsF: float32(defaultVCPUCount),
|
||||||
DefaultMaxVCPUs: defaultMaxVCPUCount,
|
DefaultMaxVCPUs: defaultMaxVCPUCount,
|
||||||
MemorySize: defaultMemSize,
|
MemorySize: defaultMemSize,
|
||||||
MemOffset: defaultMemOffset,
|
MemOffset: defaultMemOffset,
|
||||||
|
@ -161,7 +161,7 @@ func createAllRuntimeConfigFiles(dir, hypervisor string) (testConfig testRuntime
|
|||||||
RootfsType: rootfsType,
|
RootfsType: rootfsType,
|
||||||
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
KernelParams: vc.DeserializeParams(vc.KernelParamFields(kernelParams)),
|
||||||
HypervisorMachineType: machineType,
|
HypervisorMachineType: machineType,
|
||||||
NumVCPUs: defaultVCPUCount,
|
NumVCPUsF: float32(defaultVCPUCount),
|
||||||
DefaultMaxVCPUs: getCurrentCpuNum(),
|
DefaultMaxVCPUs: getCurrentCpuNum(),
|
||||||
MemorySize: defaultMemSize,
|
MemorySize: defaultMemSize,
|
||||||
DefaultMaxMemorySize: maxMemory,
|
DefaultMaxMemorySize: maxMemory,
|
||||||
@ -554,7 +554,7 @@ func TestMinimalRuntimeConfig(t *testing.T) {
|
|||||||
InitrdPath: defaultInitrdPath,
|
InitrdPath: defaultInitrdPath,
|
||||||
RootfsType: defaultRootfsType,
|
RootfsType: defaultRootfsType,
|
||||||
HypervisorMachineType: defaultMachineType,
|
HypervisorMachineType: defaultMachineType,
|
||||||
NumVCPUs: defaultVCPUCount,
|
NumVCPUsF: float32(defaultVCPUCount),
|
||||||
DefaultMaxVCPUs: defaultMaxVCPUCount,
|
DefaultMaxVCPUs: defaultMaxVCPUCount,
|
||||||
MemorySize: defaultMemSize,
|
MemorySize: defaultMemSize,
|
||||||
DisableBlockDeviceUse: defaultDisableBlockDeviceUse,
|
DisableBlockDeviceUse: defaultDisableBlockDeviceUse,
|
||||||
@ -926,7 +926,7 @@ func TestHypervisorDefaults(t *testing.T) {
|
|||||||
h := hypervisor{}
|
h := hypervisor{}
|
||||||
|
|
||||||
assert.Equal(h.machineType(), defaultMachineType, "default hypervisor machine type wrong")
|
assert.Equal(h.machineType(), defaultMachineType, "default hypervisor machine type wrong")
|
||||||
assert.Equal(h.defaultVCPUs(), defaultVCPUCount, "default vCPU number is wrong")
|
assert.Equal(h.defaultVCPUs(), float32(defaultVCPUCount), "default vCPU number is wrong")
|
||||||
assert.Equal(h.defaultMaxVCPUs(), numCPUs, "default max vCPU number is wrong")
|
assert.Equal(h.defaultMaxVCPUs(), numCPUs, "default max vCPU number is wrong")
|
||||||
assert.Equal(h.defaultMemSz(), defaultMemSize, "default memory size is wrong")
|
assert.Equal(h.defaultMemSz(), defaultMemSize, "default memory size is wrong")
|
||||||
|
|
||||||
@ -936,13 +936,13 @@ func TestHypervisorDefaults(t *testing.T) {
|
|||||||
|
|
||||||
// auto inferring
|
// auto inferring
|
||||||
h.NumVCPUs = -1
|
h.NumVCPUs = -1
|
||||||
assert.Equal(h.defaultVCPUs(), numCPUs, "default vCPU number is wrong")
|
assert.Equal(h.defaultVCPUs(), float32(numCPUs), "default vCPU number is wrong")
|
||||||
|
|
||||||
h.NumVCPUs = 2
|
h.NumVCPUs = 2
|
||||||
assert.Equal(h.defaultVCPUs(), uint32(2), "default vCPU number is wrong")
|
assert.Equal(h.defaultVCPUs(), float32(2), "default vCPU number is wrong")
|
||||||
|
|
||||||
h.NumVCPUs = int32(numCPUs) + 1
|
h.NumVCPUs = float32(numCPUs + 1)
|
||||||
assert.Equal(h.defaultVCPUs(), numCPUs, "default vCPU number is wrong")
|
assert.Equal(h.defaultVCPUs(), float32(numCPUs), "default vCPU number is wrong")
|
||||||
|
|
||||||
h.DefaultMaxVCPUs = 2
|
h.DefaultMaxVCPUs = 2
|
||||||
assert.Equal(h.defaultMaxVCPUs(), uint32(2), "default max vCPU number is wrong")
|
assert.Equal(h.defaultMaxVCPUs(), uint32(2), "default max vCPU number is wrong")
|
||||||
@ -1395,7 +1395,7 @@ func TestDefaultCPUFeatures(t *testing.T) {
|
|||||||
func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
|
func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
vcpus := uint(2)
|
vcpus := float32(2)
|
||||||
mem := uint32(2048)
|
mem := uint32(2048)
|
||||||
|
|
||||||
config := oci.RuntimeConfig{}
|
config := oci.RuntimeConfig{}
|
||||||
@ -1404,7 +1404,7 @@ func TestUpdateRuntimeConfigurationVMConfig(t *testing.T) {
|
|||||||
tomlConf := tomlConfig{
|
tomlConf := tomlConfig{
|
||||||
Hypervisor: map[string]hypervisor{
|
Hypervisor: map[string]hypervisor{
|
||||||
qemuHypervisorTableType: {
|
qemuHypervisorTableType: {
|
||||||
NumVCPUs: int32(vcpus),
|
NumVCPUs: vcpus,
|
||||||
MemorySize: mem,
|
MemorySize: mem,
|
||||||
Path: "/",
|
Path: "/",
|
||||||
Kernel: "/",
|
Kernel: "/",
|
||||||
@ -1727,7 +1727,7 @@ vfio_mode="vfio"
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, config.Hypervisor["qemu"].Path, "/usr/bin/qemu-kvm")
|
assert.Equal(t, config.Hypervisor["qemu"].Path, "/usr/bin/qemu-kvm")
|
||||||
assert.Equal(t, config.Hypervisor["qemu"].NumVCPUs, int32(2))
|
assert.Equal(t, config.Hypervisor["qemu"].NumVCPUs, float32(2))
|
||||||
assert.Equal(t, config.Hypervisor["qemu"].DefaultBridges, uint32(4))
|
assert.Equal(t, config.Hypervisor["qemu"].DefaultBridges, uint32(4))
|
||||||
assert.Equal(t, config.Hypervisor["qemu"].SharedFS, "virtio-9p")
|
assert.Equal(t, config.Hypervisor["qemu"].SharedFS, "virtio-9p")
|
||||||
assert.Equal(t, config.Runtime.Debug, true)
|
assert.Equal(t, config.Runtime.Debug, true)
|
||||||
@ -1765,7 +1765,7 @@ func TestUpdateRuntimeConfigHypervisor(t *testing.T) {
|
|||||||
tomlConf := tomlConfig{
|
tomlConf := tomlConfig{
|
||||||
Hypervisor: map[string]hypervisor{
|
Hypervisor: map[string]hypervisor{
|
||||||
h.name: {
|
h.name: {
|
||||||
NumVCPUs: int32(2),
|
NumVCPUs: float32(2),
|
||||||
MemorySize: uint32(2048),
|
MemorySize: uint32(2048),
|
||||||
Path: "/",
|
Path: "/",
|
||||||
Kernel: "/",
|
Kernel: "/",
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
goruntime "runtime"
|
goruntime "runtime"
|
||||||
@ -137,7 +138,7 @@ type RuntimeConfig struct {
|
|||||||
|
|
||||||
// Sandbox sizing information which, if provided, indicates the size of
|
// Sandbox sizing information which, if provided, indicates the size of
|
||||||
// the sandbox needed for the workload(s)
|
// the sandbox needed for the workload(s)
|
||||||
SandboxCPUs uint32
|
SandboxCPUs float32
|
||||||
SandboxMemMB uint32
|
SandboxMemMB uint32
|
||||||
|
|
||||||
// Determines if we should attempt to size the VM at boot time and skip
|
// Determines if we should attempt to size the VM at boot time and skip
|
||||||
@ -683,11 +684,11 @@ func addHypervisorMemoryOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig
|
|||||||
func addHypervisorCPUOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig) error {
|
func addHypervisorCPUOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConfig) error {
|
||||||
numCPUs := goruntime.NumCPU()
|
numCPUs := goruntime.NumCPU()
|
||||||
|
|
||||||
if err := newAnnotationConfiguration(ocispec, vcAnnotations.DefaultVCPUs).setUintWithCheck(func(vcpus uint64) error {
|
if err := newAnnotationConfiguration(ocispec, vcAnnotations.DefaultVCPUs).setFloat32WithCheck(func(vcpus float32) error {
|
||||||
if uint32(vcpus) > uint32(numCPUs) {
|
if vcpus > float32(numCPUs) {
|
||||||
return fmt.Errorf("Number of cpus %d specified in annotation default_vcpus is greater than the number of CPUs %d on the system", vcpus, numCPUs)
|
return fmt.Errorf("Number of cpus %f specified in annotation default_vcpus is greater than the number of CPUs %d on the system", vcpus, numCPUs)
|
||||||
}
|
}
|
||||||
sbConfig.HypervisorConfig.NumVCPUs = uint32(vcpus)
|
sbConfig.HypervisorConfig.NumVCPUsF = float32(vcpus)
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1016,10 +1017,10 @@ func SandboxConfig(ocispec specs.Spec, runtime RuntimeConfig, bundlePath, cid st
|
|||||||
// with the base number of CPU/memory (which is equal to the default CPU/memory specified for the runtime
|
// with the base number of CPU/memory (which is equal to the default CPU/memory specified for the runtime
|
||||||
// configuration or annotations) as well as any specified workload resources.
|
// configuration or annotations) as well as any specified workload resources.
|
||||||
if sandboxConfig.StaticResourceMgmt {
|
if sandboxConfig.StaticResourceMgmt {
|
||||||
sandboxConfig.SandboxResources.BaseCPUs = sandboxConfig.HypervisorConfig.NumVCPUs
|
sandboxConfig.SandboxResources.BaseCPUs = sandboxConfig.HypervisorConfig.NumVCPUsF
|
||||||
sandboxConfig.SandboxResources.BaseMemMB = sandboxConfig.HypervisorConfig.MemorySize
|
sandboxConfig.SandboxResources.BaseMemMB = sandboxConfig.HypervisorConfig.MemorySize
|
||||||
|
|
||||||
sandboxConfig.HypervisorConfig.NumVCPUs += sandboxConfig.SandboxResources.WorkloadCPUs
|
sandboxConfig.HypervisorConfig.NumVCPUsF += sandboxConfig.SandboxResources.WorkloadCPUs
|
||||||
sandboxConfig.HypervisorConfig.MemorySize += sandboxConfig.SandboxResources.WorkloadMemMB
|
sandboxConfig.HypervisorConfig.MemorySize += sandboxConfig.SandboxResources.WorkloadMemMB
|
||||||
|
|
||||||
ociLog.WithFields(logrus.Fields{
|
ociLog.WithFields(logrus.Fields{
|
||||||
@ -1140,6 +1141,7 @@ func IsCRIOContainerManager(spec *specs.Spec) bool {
|
|||||||
const (
|
const (
|
||||||
errAnnotationPositiveNumericKey = "Error parsing annotation for %s: Please specify positive numeric value"
|
errAnnotationPositiveNumericKey = "Error parsing annotation for %s: Please specify positive numeric value"
|
||||||
errAnnotationBoolKey = "Error parsing annotation for %s: Please specify boolean value 'true|false'"
|
errAnnotationBoolKey = "Error parsing annotation for %s: Please specify boolean value 'true|false'"
|
||||||
|
errAnnotationNumericKeyIsTooBig = "Error parsing annotation for %s: The number exceeds the maximum allowed for its type"
|
||||||
)
|
)
|
||||||
|
|
||||||
type annotationConfiguration struct {
|
type annotationConfiguration struct {
|
||||||
@ -1183,9 +1185,24 @@ func (a *annotationConfiguration) setUintWithCheck(f func(uint64) error) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *annotationConfiguration) setFloat32WithCheck(f func(float32) error) error {
|
||||||
|
if value, ok := a.ocispec.Annotations[a.key]; ok {
|
||||||
|
float64Value, err := strconv.ParseFloat(value, 32)
|
||||||
|
if err != nil || float64Value < 0 {
|
||||||
|
return fmt.Errorf(errAnnotationPositiveNumericKey, a.key)
|
||||||
|
}
|
||||||
|
if float64Value > math.MaxFloat32 {
|
||||||
|
return fmt.Errorf(errAnnotationNumericKeyIsTooBig, a.key)
|
||||||
|
}
|
||||||
|
float32Value := float32(float64Value)
|
||||||
|
return f(float32Value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// CalculateSandboxSizing will calculate the number of CPUs and amount of Memory that should
|
// CalculateSandboxSizing will calculate the number of CPUs and amount of Memory that should
|
||||||
// be added to the VM if sandbox annotations are provided with this sizing details
|
// be added to the VM if sandbox annotations are provided with this sizing details
|
||||||
func CalculateSandboxSizing(spec *specs.Spec) (numCPU, memSizeMB uint32) {
|
func CalculateSandboxSizing(spec *specs.Spec) (numCPU float32, memSizeMB uint32) {
|
||||||
var memory, quota int64
|
var memory, quota int64
|
||||||
var period uint64
|
var period uint64
|
||||||
var err error
|
var err error
|
||||||
@ -1232,7 +1249,7 @@ func CalculateSandboxSizing(spec *specs.Spec) (numCPU, memSizeMB uint32) {
|
|||||||
|
|
||||||
// CalculateContainerSizing will calculate the number of CPUs and amount of memory that is needed
|
// CalculateContainerSizing will calculate the number of CPUs and amount of memory that is needed
|
||||||
// based on the provided LinuxResources
|
// based on the provided LinuxResources
|
||||||
func CalculateContainerSizing(spec *specs.Spec) (numCPU, memSizeMB uint32) {
|
func CalculateContainerSizing(spec *specs.Spec) (numCPU float32, memSizeMB uint32) {
|
||||||
var memory, quota int64
|
var memory, quota int64
|
||||||
var period uint64
|
var period uint64
|
||||||
|
|
||||||
@ -1254,8 +1271,8 @@ func CalculateContainerSizing(spec *specs.Spec) (numCPU, memSizeMB uint32) {
|
|||||||
return calculateVMResources(period, quota, memory)
|
return calculateVMResources(period, quota, memory)
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateVMResources(period uint64, quota int64, memory int64) (numCPU, memSizeMB uint32) {
|
func calculateVMResources(period uint64, quota int64, memory int64) (numCPU float32, memSizeMB uint32) {
|
||||||
numCPU = vcutils.CalculateVCpusFromMilliCpus(vcutils.CalculateMilliCPUs(quota, period))
|
numCPU = vcutils.CalculateCPUsF(quota, period)
|
||||||
|
|
||||||
if memory < 0 {
|
if memory < 0 {
|
||||||
// While spec allows for a negative value to indicate unconstrained, we don't
|
// While spec allows for a negative value to indicate unconstrained, we don't
|
||||||
|
@ -671,7 +671,7 @@ func TestAddHypervisorAnnotations(t *testing.T) {
|
|||||||
err := addAnnotations(ocispec, &sbConfig, runtimeConfig)
|
err := addAnnotations(ocispec, &sbConfig, runtimeConfig)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
|
|
||||||
assert.Equal(sbConfig.HypervisorConfig.NumVCPUs, uint32(1))
|
assert.Equal(sbConfig.HypervisorConfig.NumVCPUsF, float32(1))
|
||||||
assert.Equal(sbConfig.HypervisorConfig.DefaultMaxVCPUs, uint32(1))
|
assert.Equal(sbConfig.HypervisorConfig.DefaultMaxVCPUs, uint32(1))
|
||||||
assert.Equal(sbConfig.HypervisorConfig.MemorySize, uint32(1024))
|
assert.Equal(sbConfig.HypervisorConfig.MemorySize, uint32(1024))
|
||||||
assert.Equal(sbConfig.HypervisorConfig.MemSlots, uint32(20))
|
assert.Equal(sbConfig.HypervisorConfig.MemSlots, uint32(20))
|
||||||
@ -1087,7 +1087,7 @@ func TestCalculateContainerSizing(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
spec *specs.Spec
|
spec *specs.Spec
|
||||||
expectedCPU uint32
|
expectedCPU float32
|
||||||
expectedMem uint32
|
expectedMem uint32
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
@ -1152,7 +1152,7 @@ func TestCalculateSandboxSizing(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
spec *specs.Spec
|
spec *specs.Spec
|
||||||
expectedCPU uint32
|
expectedCPU float32
|
||||||
expectedMem uint32
|
expectedMem uint32
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,7 @@ func newAcrnConfig() HypervisorConfig {
|
|||||||
ImagePath: testAcrnImagePath,
|
ImagePath: testAcrnImagePath,
|
||||||
HypervisorPath: testAcrnPath,
|
HypervisorPath: testAcrnPath,
|
||||||
HypervisorCtlPath: testAcrnCtlPath,
|
HypervisorCtlPath: testAcrnCtlPath,
|
||||||
NumVCPUs: defaultVCPUs,
|
NumVCPUsF: defaultVCPUs,
|
||||||
MemorySize: defaultMemSzMiB,
|
MemorySize: defaultMemSzMiB,
|
||||||
BlockDeviceDriver: config.VirtioBlock,
|
BlockDeviceDriver: config.VirtioBlock,
|
||||||
DefaultBridges: defaultBridges,
|
DefaultBridges: defaultBridges,
|
||||||
|
@ -520,7 +520,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, network Net
|
|||||||
clh.vmconfig.Memory.HotplugSize = func(i int64) *int64 { return &i }(int64((utils.MemUnit(hotplugSize) * utils.MiB).ToBytes()))
|
clh.vmconfig.Memory.HotplugSize = func(i int64) *int64 { return &i }(int64((utils.MemUnit(hotplugSize) * utils.MiB).ToBytes()))
|
||||||
}
|
}
|
||||||
// Set initial amount of cpu's for the virtual machine
|
// Set initial amount of cpu's for the virtual machine
|
||||||
clh.vmconfig.Cpus = chclient.NewCpusConfig(int32(clh.config.NumVCPUs), int32(clh.config.DefaultMaxVCPUs))
|
clh.vmconfig.Cpus = chclient.NewCpusConfig(int32(clh.config.NumVCPUs()), int32(clh.config.DefaultMaxVCPUs))
|
||||||
|
|
||||||
params, err := GetKernelRootParams(hypervisorConfig.RootfsType, clh.config.ConfidentialGuest, false)
|
params, err := GetKernelRootParams(hypervisorConfig.RootfsType, clh.config.ConfidentialGuest, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -855,7 +855,7 @@ func (clh *cloudHypervisor) hotplugAddBlockDevice(drive *config.BlockDrive) erro
|
|||||||
clhDisk.Direct = &clh.config.BlockDeviceCacheDirect
|
clhDisk.Direct = &clh.config.BlockDeviceCacheDirect
|
||||||
}
|
}
|
||||||
|
|
||||||
queues := int32(clh.config.NumVCPUs)
|
queues := int32(clh.config.NumVCPUs())
|
||||||
queueSize := int32(1024)
|
queueSize := int32(1024)
|
||||||
clhDisk.NumQueues = &queues
|
clhDisk.NumQueues = &queues
|
||||||
clhDisk.QueueSize = &queueSize
|
clhDisk.QueueSize = &queueSize
|
||||||
|
@ -56,7 +56,7 @@ func newClhConfig() (HypervisorConfig, error) {
|
|||||||
ImagePath: testClhImagePath,
|
ImagePath: testClhImagePath,
|
||||||
RootfsType: string(EXT4),
|
RootfsType: string(EXT4),
|
||||||
HypervisorPath: testClhPath,
|
HypervisorPath: testClhPath,
|
||||||
NumVCPUs: defaultVCPUs,
|
NumVCPUsF: defaultVCPUs,
|
||||||
BlockDeviceDriver: config.VirtioBlock,
|
BlockDeviceDriver: config.VirtioBlock,
|
||||||
MemorySize: defaultMemSzMiB,
|
MemorySize: defaultMemSzMiB,
|
||||||
DefaultBridges: defaultBridges,
|
DefaultBridges: defaultBridges,
|
||||||
|
@ -71,7 +71,7 @@ func NewFactory(ctx context.Context, config Config, fetchOnly bool) (vc.Factory,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func resetHypervisorConfig(config *vc.VMConfig) {
|
func resetHypervisorConfig(config *vc.VMConfig) {
|
||||||
config.HypervisorConfig.NumVCPUs = 0
|
config.HypervisorConfig.NumVCPUsF = 0
|
||||||
config.HypervisorConfig.MemorySize = 0
|
config.HypervisorConfig.MemorySize = 0
|
||||||
config.HypervisorConfig.BootToBeTemplate = false
|
config.HypervisorConfig.BootToBeTemplate = false
|
||||||
config.HypervisorConfig.BootFromTemplate = false
|
config.HypervisorConfig.BootFromTemplate = false
|
||||||
@ -156,8 +156,8 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
|
|||||||
|
|
||||||
online := false
|
online := false
|
||||||
baseConfig := f.base.Config().HypervisorConfig
|
baseConfig := f.base.Config().HypervisorConfig
|
||||||
if baseConfig.NumVCPUs < hypervisorConfig.NumVCPUs {
|
if baseConfig.NumVCPUsF < hypervisorConfig.NumVCPUsF {
|
||||||
err = vm.AddCPUs(ctx, hypervisorConfig.NumVCPUs-baseConfig.NumVCPUs)
|
err = vm.AddCPUs(ctx, hypervisorConfig.NumVCPUs()-baseConfig.NumVCPUs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -246,7 +246,7 @@ func TestFactoryGetVM(t *testing.T) {
|
|||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
// CPU hotplug
|
// CPU hotplug
|
||||||
vmConfig.HypervisorConfig.NumVCPUs++
|
vmConfig.HypervisorConfig.NumVCPUsF++
|
||||||
vm, err = f.GetVM(ctx, vmConfig)
|
vm, err = f.GetVM(ctx, vmConfig)
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
@ -278,9 +278,9 @@ func TestDeepCompare(t *testing.T) {
|
|||||||
bar := vc.VMConfig{}
|
bar := vc.VMConfig{}
|
||||||
assert.True(utils.DeepCompare(foo, bar))
|
assert.True(utils.DeepCompare(foo, bar))
|
||||||
|
|
||||||
foo.HypervisorConfig.NumVCPUs = 1
|
foo.HypervisorConfig.NumVCPUsF = 1
|
||||||
assert.False(utils.DeepCompare(foo, bar))
|
assert.False(utils.DeepCompare(foo, bar))
|
||||||
bar.HypervisorConfig.NumVCPUs = 1
|
bar.HypervisorConfig.NumVCPUsF = 1
|
||||||
assert.True(utils.DeepCompare(foo, bar))
|
assert.True(utils.DeepCompare(foo, bar))
|
||||||
|
|
||||||
// slice
|
// slice
|
||||||
|
@ -692,7 +692,7 @@ func (fc *firecracker) fcInitConfiguration(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fc.fcSetVMBaseConfig(ctx, int64(fc.config.MemorySize),
|
fc.fcSetVMBaseConfig(ctx, int64(fc.config.MemorySize),
|
||||||
int64(fc.config.NumVCPUs), false)
|
int64(fc.config.NumVCPUs()), false)
|
||||||
|
|
||||||
kernelPath, err := fc.config.KernelAssetPath()
|
kernelPath, err := fc.config.KernelAssetPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@ -58,7 +59,7 @@ const (
|
|||||||
|
|
||||||
procCPUInfo = "/proc/cpuinfo"
|
procCPUInfo = "/proc/cpuinfo"
|
||||||
|
|
||||||
defaultVCPUs = 1
|
defaultVCPUs = float32(1)
|
||||||
// 2 GiB
|
// 2 GiB
|
||||||
defaultMemSzMiB = 2048
|
defaultMemSzMiB = 2048
|
||||||
|
|
||||||
@ -524,7 +525,7 @@ type HypervisorConfig struct {
|
|||||||
ColdPlugVFIO config.PCIePort
|
ColdPlugVFIO config.PCIePort
|
||||||
|
|
||||||
// NumVCPUs specifies default number of vCPUs for the VM.
|
// NumVCPUs specifies default number of vCPUs for the VM.
|
||||||
NumVCPUs uint32
|
NumVCPUsF float32
|
||||||
|
|
||||||
//DefaultMaxVCPUs specifies the maximum number of vCPUs for the VM.
|
//DefaultMaxVCPUs specifies the maximum number of vCPUs for the VM.
|
||||||
DefaultMaxVCPUs uint32
|
DefaultMaxVCPUs uint32
|
||||||
@ -838,6 +839,14 @@ func (conf *HypervisorConfig) FirmwareVolumeAssetPath() (string, error) {
|
|||||||
return conf.assetPath(types.FirmwareVolumeAsset)
|
return conf.assetPath(types.FirmwareVolumeAsset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RoundUpNumVCPUs(cpus float32) uint32 {
|
||||||
|
return uint32(math.Ceil(float64(cpus)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conf HypervisorConfig) NumVCPUs() uint32 {
|
||||||
|
return RoundUpNumVCPUs(conf.NumVCPUsF)
|
||||||
|
}
|
||||||
|
|
||||||
func appendParam(params []Param, parameter string, value string) []Param {
|
func appendParam(params []Param, parameter string, value string) []Param {
|
||||||
return append(params, Param{parameter, value})
|
return append(params, Param{parameter, value})
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ func validateHypervisorConfig(conf *HypervisorConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if conf.NumVCPUs == 0 {
|
if conf.NumVCPUs == 0 {
|
||||||
conf.NumVCPUs = defaultVCPUs
|
conf.NumVCPUsF = defaultVCPUs
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.MemorySize == 0 {
|
if conf.MemorySize == 0 {
|
||||||
|
@ -32,8 +32,8 @@ func validateHypervisorConfig(conf *HypervisorConfig) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.NumVCPUs == 0 {
|
if conf.NumVCPUsF == 0 {
|
||||||
conf.NumVCPUs = defaultVCPUs
|
conf.NumVCPUsF = defaultVCPUs
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.MemorySize == 0 {
|
if conf.MemorySize == 0 {
|
||||||
@ -54,11 +54,6 @@ func validateHypervisorConfig(conf *HypervisorConfig) error {
|
|||||||
conf.DefaultMaxVCPUs = defaultMaxVCPUs
|
conf.DefaultMaxVCPUs = defaultMaxVCPUs
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.ConfidentialGuest && conf.NumVCPUs != conf.DefaultMaxVCPUs {
|
|
||||||
hvLogger.Warnf("Confidential guests do not support hotplugging of vCPUs. Setting DefaultMaxVCPUs to NumVCPUs (%d)", conf.NumVCPUs)
|
|
||||||
conf.DefaultMaxVCPUs = conf.NumVCPUs
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.Msize9p == 0 && conf.SharedFS != config.VirtioFS {
|
if conf.Msize9p == 0 && conf.SharedFS != config.VirtioFS {
|
||||||
conf.Msize9p = defaultMsize9p
|
conf.Msize9p = defaultMsize9p
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ func TestHypervisorConfigDefaults(t *testing.T) {
|
|||||||
KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel),
|
KernelPath: fmt.Sprintf("%s/%s", testDir, testKernel),
|
||||||
ImagePath: fmt.Sprintf("%s/%s", testDir, testImage),
|
ImagePath: fmt.Sprintf("%s/%s", testDir, testImage),
|
||||||
HypervisorPath: "",
|
HypervisorPath: "",
|
||||||
NumVCPUs: defaultVCPUs,
|
NumVCPUsF: defaultVCPUs,
|
||||||
MemorySize: defaultMemSzMiB,
|
MemorySize: defaultMemSzMiB,
|
||||||
DefaultBridges: defaultBridges,
|
DefaultBridges: defaultBridges,
|
||||||
BlockDeviceDriver: defaultBlockDriver,
|
BlockDeviceDriver: defaultBlockDriver,
|
||||||
|
@ -71,13 +71,13 @@ func (endpoint *MacvtapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
|||||||
|
|
||||||
h := s.hypervisor
|
h := s.hypervisor
|
||||||
|
|
||||||
endpoint.VMFds, err = createMacvtapFds(endpoint.EndpointProperties.Iface.Index, int(h.HypervisorConfig().NumVCPUs))
|
endpoint.VMFds, err = createMacvtapFds(endpoint.EndpointProperties.Iface.Index, int(h.HypervisorConfig().NumVCPUs()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Could not setup macvtap fds %s: %s", endpoint.EndpointProperties.Iface.Name, err)
|
return fmt.Errorf("Could not setup macvtap fds %s: %s", endpoint.EndpointProperties.Iface.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !h.HypervisorConfig().DisableVhostNet {
|
if !h.HypervisorConfig().DisableVhostNet {
|
||||||
vhostFds, err := createVhostFds(int(h.HypervisorConfig().NumVCPUs))
|
vhostFds, err := createVhostFds(int(h.HypervisorConfig().NumVCPUs()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Could not setup vhost fds %s : %s", endpoint.EndpointProperties.Iface.Name, err)
|
return fmt.Errorf("Could not setup vhost fds %s : %s", endpoint.EndpointProperties.Iface.Name, err)
|
||||||
}
|
}
|
||||||
|
@ -592,7 +592,7 @@ func xConnectVMNetwork(ctx context.Context, endpoint Endpoint, h Hypervisor) err
|
|||||||
queues := 0
|
queues := 0
|
||||||
caps := h.Capabilities(ctx)
|
caps := h.Capabilities(ctx)
|
||||||
if caps.IsMultiQueueSupported() {
|
if caps.IsMultiQueueSupported() {
|
||||||
queues = int(h.HypervisorConfig().NumVCPUs)
|
queues = int(h.HypervisorConfig().NumVCPUs())
|
||||||
}
|
}
|
||||||
|
|
||||||
disableVhostNet := h.HypervisorConfig().DisableVhostNet
|
disableVhostNet := h.HypervisorConfig().DisableVhostNet
|
||||||
|
@ -200,7 +200,7 @@ func (s *Sandbox) dumpConfig(ss *persistapi.SandboxState) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ss.Config.HypervisorConfig = persistapi.HypervisorConfig{
|
ss.Config.HypervisorConfig = persistapi.HypervisorConfig{
|
||||||
NumVCPUs: sconfig.HypervisorConfig.NumVCPUs,
|
NumVCPUsF: sconfig.HypervisorConfig.NumVCPUsF,
|
||||||
DefaultMaxVCPUs: sconfig.HypervisorConfig.DefaultMaxVCPUs,
|
DefaultMaxVCPUs: sconfig.HypervisorConfig.DefaultMaxVCPUs,
|
||||||
MemorySize: sconfig.HypervisorConfig.MemorySize,
|
MemorySize: sconfig.HypervisorConfig.MemorySize,
|
||||||
DefaultBridges: sconfig.HypervisorConfig.DefaultBridges,
|
DefaultBridges: sconfig.HypervisorConfig.DefaultBridges,
|
||||||
@ -440,7 +440,7 @@ func loadSandboxConfig(id string) (*SandboxConfig, error) {
|
|||||||
|
|
||||||
hconf := savedConf.HypervisorConfig
|
hconf := savedConf.HypervisorConfig
|
||||||
sconfig.HypervisorConfig = HypervisorConfig{
|
sconfig.HypervisorConfig = HypervisorConfig{
|
||||||
NumVCPUs: hconf.NumVCPUs,
|
NumVCPUsF: hconf.NumVCPUsF,
|
||||||
DefaultMaxVCPUs: hconf.DefaultMaxVCPUs,
|
DefaultMaxVCPUs: hconf.DefaultMaxVCPUs,
|
||||||
MemorySize: hconf.MemorySize,
|
MemorySize: hconf.MemorySize,
|
||||||
DefaultBridges: hconf.DefaultBridges,
|
DefaultBridges: hconf.DefaultBridges,
|
||||||
|
@ -132,7 +132,7 @@ type HypervisorConfig struct {
|
|||||||
SGXEPCSize int64
|
SGXEPCSize int64
|
||||||
|
|
||||||
// NumVCPUs specifies default number of vCPUs for the VM.
|
// NumVCPUs specifies default number of vCPUs for the VM.
|
||||||
NumVCPUs uint32
|
NumVCPUsF float32
|
||||||
|
|
||||||
//DefaultMaxVCPUs specifies the maximum number of vCPUs for the VM.
|
//DefaultMaxVCPUs specifies the maximum number of vCPUs for the VM.
|
||||||
DefaultMaxVCPUs uint32
|
DefaultMaxVCPUs uint32
|
||||||
|
@ -315,7 +315,7 @@ func (q *qemu) setup(ctx context.Context, id string, hypervisorConfig *Hyperviso
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *qemu) cpuTopology() govmmQemu.SMP {
|
func (q *qemu) cpuTopology() govmmQemu.SMP {
|
||||||
return q.arch.cpuTopology(q.config.NumVCPUs, q.config.DefaultMaxVCPUs)
|
return q.arch.cpuTopology(q.config.NumVCPUs(), q.config.DefaultMaxVCPUs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
|
func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
|
||||||
@ -630,7 +630,7 @@ func (q *qemu) CreateVM(ctx context.Context, id string, network Network, hypervi
|
|||||||
// we must ensure we use something like:
|
// we must ensure we use something like:
|
||||||
// `...,sockets=1,cores=numvcpus,threads=1,...`
|
// `...,sockets=1,cores=numvcpus,threads=1,...`
|
||||||
smp.Sockets = 1
|
smp.Sockets = 1
|
||||||
smp.Cores = q.config.NumVCPUs
|
smp.Cores = q.config.NumVCPUs()
|
||||||
smp.Threads = 1
|
smp.Threads = 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1590,7 +1590,7 @@ func (q *qemu) hotplugAddBlockDevice(ctx context.Context, drive *config.BlockDri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
queues := int(q.config.NumVCPUs)
|
queues := int(q.config.NumVCPUs())
|
||||||
|
|
||||||
if err = q.qmpMonitorCh.qmp.ExecutePCIDeviceAdd(q.qmpMonitorCh.ctx, drive.ID, devID, driver, addr, bridge.ID, romFile, queues, true, defaultDisableModern); err != nil {
|
if err = q.qmpMonitorCh.qmp.ExecutePCIDeviceAdd(q.qmpMonitorCh.ctx, drive.ID, devID, driver, addr, bridge.ID, romFile, queues, true, defaultDisableModern); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1922,7 +1922,7 @@ func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op Opera
|
|||||||
addr := "00"
|
addr := "00"
|
||||||
bridgeID := fmt.Sprintf("%s%d", config.PCIeRootPortPrefix, len(config.PCIeDevices[config.RootPort]))
|
bridgeID := fmt.Sprintf("%s%d", config.PCIeRootPortPrefix, len(config.PCIeDevices[config.RootPort]))
|
||||||
config.PCIeDevices[config.RootPort][devID] = true
|
config.PCIeDevices[config.RootPort][devID] = true
|
||||||
return q.qmpMonitorCh.qmp.ExecuteNetPCIDeviceAdd(q.qmpMonitorCh.ctx, tap.Name, devID, endpoint.HardwareAddr(), addr, bridgeID, romFile, int(q.config.NumVCPUs), defaultDisableModern)
|
return q.qmpMonitorCh.qmp.ExecuteNetPCIDeviceAdd(q.qmpMonitorCh.ctx, tap.Name, devID, endpoint.HardwareAddr(), addr, bridgeID, romFile, int(q.config.NumVCPUs()), defaultDisableModern)
|
||||||
}
|
}
|
||||||
|
|
||||||
addr, bridge, err := q.arch.addDeviceToBridge(ctx, tap.ID, types.PCI)
|
addr, bridge, err := q.arch.addDeviceToBridge(ctx, tap.ID, types.PCI)
|
||||||
@ -1954,9 +1954,9 @@ func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op Opera
|
|||||||
}
|
}
|
||||||
if machine.Type == QemuCCWVirtio {
|
if machine.Type == QemuCCWVirtio {
|
||||||
devNoHotplug := fmt.Sprintf("fe.%x.%x", bridge.Addr, addr)
|
devNoHotplug := fmt.Sprintf("fe.%x.%x", bridge.Addr, addr)
|
||||||
return q.qmpMonitorCh.qmp.ExecuteNetCCWDeviceAdd(q.qmpMonitorCh.ctx, tap.Name, devID, endpoint.HardwareAddr(), devNoHotplug, int(q.config.NumVCPUs))
|
return q.qmpMonitorCh.qmp.ExecuteNetCCWDeviceAdd(q.qmpMonitorCh.ctx, tap.Name, devID, endpoint.HardwareAddr(), devNoHotplug, int(q.config.NumVCPUs()))
|
||||||
}
|
}
|
||||||
return q.qmpMonitorCh.qmp.ExecuteNetPCIDeviceAdd(q.qmpMonitorCh.ctx, tap.Name, devID, endpoint.HardwareAddr(), addr, bridge.ID, romFile, int(q.config.NumVCPUs), defaultDisableModern)
|
return q.qmpMonitorCh.qmp.ExecuteNetPCIDeviceAdd(q.qmpMonitorCh.ctx, tap.Name, devID, endpoint.HardwareAddr(), addr, bridge.ID, romFile, int(q.config.NumVCPUs()), defaultDisableModern)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := q.arch.removeDeviceFromBridge(tap.ID); err != nil {
|
if err := q.arch.removeDeviceFromBridge(tap.ID); err != nil {
|
||||||
@ -2687,9 +2687,9 @@ func calcHotplugMemMiBSize(mem uint32, memorySectionSizeMB uint32) (uint32, erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (q *qemu) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
func (q *qemu) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||||
|
currentVCPUs = q.config.NumVCPUs() + uint32(len(q.state.HotpluggedVCPUs))
|
||||||
currentVCPUs = q.config.NumVCPUs + uint32(len(q.state.HotpluggedVCPUs))
|
|
||||||
newVCPUs = currentVCPUs
|
newVCPUs = currentVCPUs
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case currentVCPUs < reqVCPUs:
|
case currentVCPUs < reqVCPUs:
|
||||||
//hotplug
|
//hotplug
|
||||||
@ -2716,6 +2716,7 @@ func (q *qemu) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs u
|
|||||||
}
|
}
|
||||||
newVCPUs -= vCPUsRemoved
|
newVCPUs -= vCPUsRemoved
|
||||||
}
|
}
|
||||||
|
|
||||||
return currentVCPUs, newVCPUs, nil
|
return currentVCPUs, newVCPUs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ func newQemuConfig() HypervisorConfig {
|
|||||||
KernelPath: testQemuKernelPath,
|
KernelPath: testQemuKernelPath,
|
||||||
InitrdPath: testQemuInitrdPath,
|
InitrdPath: testQemuInitrdPath,
|
||||||
HypervisorPath: testQemuPath,
|
HypervisorPath: testQemuPath,
|
||||||
NumVCPUs: defaultVCPUs,
|
NumVCPUsF: defaultVCPUs,
|
||||||
MemorySize: defaultMemSzMiB,
|
MemorySize: defaultMemSzMiB,
|
||||||
DefaultBridges: defaultBridges,
|
DefaultBridges: defaultBridges,
|
||||||
BlockDeviceDriver: defaultBlockDriver,
|
BlockDeviceDriver: defaultBlockDriver,
|
||||||
@ -258,12 +258,12 @@ func TestQemuCreateVMMissingParentDirFail(t *testing.T) {
|
|||||||
|
|
||||||
func TestQemuCPUTopology(t *testing.T) {
|
func TestQemuCPUTopology(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
vcpus := 1
|
vcpus := float32(1)
|
||||||
|
|
||||||
q := &qemu{
|
q := &qemu{
|
||||||
arch: &qemuArchBase{},
|
arch: &qemuArchBase{},
|
||||||
config: HypervisorConfig{
|
config: HypervisorConfig{
|
||||||
NumVCPUs: uint32(vcpus),
|
NumVCPUsF: vcpus,
|
||||||
DefaultMaxVCPUs: uint32(vcpus),
|
DefaultMaxVCPUs: uint32(vcpus),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -116,9 +116,9 @@ type SandboxStats struct {
|
|||||||
|
|
||||||
type SandboxResourceSizing struct {
|
type SandboxResourceSizing struct {
|
||||||
// The number of CPUs required for the sandbox workload(s)
|
// The number of CPUs required for the sandbox workload(s)
|
||||||
WorkloadCPUs uint32
|
WorkloadCPUs float32
|
||||||
// The base number of CPUs for the VM that are assigned as overhead
|
// The base number of CPUs for the VM that are assigned as overhead
|
||||||
BaseCPUs uint32
|
BaseCPUs float32
|
||||||
// The amount of memory required for the sandbox workload(s)
|
// The amount of memory required for the sandbox workload(s)
|
||||||
WorkloadMemMB uint32
|
WorkloadMemMB uint32
|
||||||
// The base amount of memory required for that VM that is assigned as overhead
|
// The base amount of memory required for that VM that is assigned as overhead
|
||||||
@ -2168,12 +2168,13 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
|
|||||||
s.Logger().Debug("no resources updated: static resource management is set")
|
s.Logger().Debug("no resources updated: static resource management is set")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
sandboxVCPUs, err := s.calculateSandboxCPUs()
|
sandboxVCPUs, err := s.calculateSandboxCPUs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Add default vcpus for sandbox
|
// Add default vcpus for sandbox
|
||||||
sandboxVCPUs += s.hypervisor.HypervisorConfig().NumVCPUs
|
sandboxVCPUs += s.hypervisor.HypervisorConfig().NumVCPUsF
|
||||||
|
|
||||||
sandboxMemoryByte, sandboxneedPodSwap, sandboxSwapByte := s.calculateSandboxMemory()
|
sandboxMemoryByte, sandboxneedPodSwap, sandboxSwapByte := s.calculateSandboxMemory()
|
||||||
|
|
||||||
@ -2196,7 +2197,7 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
|
|||||||
|
|
||||||
// Update VCPUs
|
// Update VCPUs
|
||||||
s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs")
|
s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs")
|
||||||
oldCPUs, newCPUs, err := s.hypervisor.ResizeVCPUs(ctx, sandboxVCPUs)
|
oldCPUs, newCPUs, err := s.hypervisor.ResizeVCPUs(ctx, RoundUpNumVCPUs(sandboxVCPUs))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2392,8 +2393,8 @@ func (s *Sandbox) calculateSandboxMemory() (uint64, bool, int64) {
|
|||||||
return memorySandbox, needPodSwap, swapSandbox
|
return memorySandbox, needPodSwap, swapSandbox
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sandbox) calculateSandboxCPUs() (uint32, error) {
|
func (s *Sandbox) calculateSandboxCPUs() (float32, error) {
|
||||||
mCPU := uint32(0)
|
floatCPU := float32(0)
|
||||||
cpusetCount := int(0)
|
cpusetCount := int(0)
|
||||||
|
|
||||||
for _, c := range s.config.Containers {
|
for _, c := range s.config.Containers {
|
||||||
@ -2405,7 +2406,7 @@ func (s *Sandbox) calculateSandboxCPUs() (uint32, error) {
|
|||||||
|
|
||||||
if cpu := c.Resources.CPU; cpu != nil {
|
if cpu := c.Resources.CPU; cpu != nil {
|
||||||
if cpu.Period != nil && cpu.Quota != nil {
|
if cpu.Period != nil && cpu.Quota != nil {
|
||||||
mCPU += utils.CalculateMilliCPUs(*cpu.Quota, *cpu.Period)
|
floatCPU += utils.CalculateCPUsF(*cpu.Quota, *cpu.Period)
|
||||||
}
|
}
|
||||||
|
|
||||||
set, err := cpuset.Parse(cpu.Cpus)
|
set, err := cpuset.Parse(cpu.Cpus)
|
||||||
@ -2419,11 +2420,11 @@ func (s *Sandbox) calculateSandboxCPUs() (uint32, error) {
|
|||||||
// If we aren't being constrained, then we could have two scenarios:
|
// If we aren't being constrained, then we could have two scenarios:
|
||||||
// 1. BestEffort QoS: no proper support today in Kata.
|
// 1. BestEffort QoS: no proper support today in Kata.
|
||||||
// 2. We could be constrained only by CPUSets. Check for this:
|
// 2. We could be constrained only by CPUSets. Check for this:
|
||||||
if mCPU == 0 && cpusetCount > 0 {
|
if floatCPU == 0 && cpusetCount > 0 {
|
||||||
return uint32(cpusetCount), nil
|
return float32(cpusetCount), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return utils.CalculateVCpusFromMilliCpus(mCPU), nil
|
return floatCPU, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHypervisorType is used for getting Hypervisor name currently used.
|
// GetHypervisorType is used for getting Hypervisor name currently used.
|
||||||
|
@ -126,7 +126,7 @@ func TestCalculateSandboxCPUs(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
containers []ContainerConfig
|
containers []ContainerConfig
|
||||||
want uint32
|
want float32
|
||||||
}{
|
}{
|
||||||
{"1-unconstrained", []ContainerConfig{unconstrained}, 0},
|
{"1-unconstrained", []ContainerConfig{unconstrained}, 0},
|
||||||
{"2-unconstrained", []ContainerConfig{unconstrained, unconstrained}, 0},
|
{"2-unconstrained", []ContainerConfig{unconstrained, unconstrained}, 0},
|
||||||
|
@ -98,7 +98,7 @@ func (endpoint *TapEndpoint) HotAttach(ctx context.Context, h Hypervisor) error
|
|||||||
span, ctx := tapTrace(ctx, "HotAttach", endpoint)
|
span, ctx := tapTrace(ctx, "HotAttach", endpoint)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
if err := tapNetwork(endpoint, h.HypervisorConfig().NumVCPUs, h.HypervisorConfig().DisableVhostNet); err != nil {
|
if err := tapNetwork(endpoint, h.HypervisorConfig().NumVCPUs(), h.HypervisorConfig().DisableVhostNet); err != nil {
|
||||||
networkLogger().WithError(err).Error("Error bridging tap ep")
|
networkLogger().WithError(err).Error("Error bridging tap ep")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ func (endpoint *TuntapEndpoint) HotAttach(ctx context.Context, h Hypervisor) err
|
|||||||
span, ctx := tuntapTrace(ctx, "HotAttach", endpoint)
|
span, ctx := tuntapTrace(ctx, "HotAttach", endpoint)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
if err := tuntapNetwork(endpoint, h.HypervisorConfig().NumVCPUs, h.HypervisorConfig().DisableVhostNet); err != nil {
|
if err := tuntapNetwork(endpoint, h.HypervisorConfig().NumVCPUs(), h.HypervisorConfig().DisableVhostNet); err != nil {
|
||||||
networkLogger().WithError(err).Error("Error bridging tun/tap ep")
|
networkLogger().WithError(err).Error("Error bridging tun/tap ep")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -121,25 +121,18 @@ func WriteToFile(path string, data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CalculateMilliCPUs converts CPU quota and period to milli-CPUs
|
// CalculateCPUsF converts CPU quota and period to a fraction number
|
||||||
func CalculateMilliCPUs(quota int64, period uint64) uint32 {
|
func CalculateCPUsF(quota int64, period uint64) float32 {
|
||||||
|
|
||||||
// If quota is -1, it means the CPU resource request is
|
// If quota is -1, it means the CPU resource request is
|
||||||
// unconstrained. In that case, we don't currently assign
|
// unconstrained. In that case, we don't currently assign
|
||||||
// additional CPUs.
|
// additional CPUs.
|
||||||
if quota >= 0 && period != 0 {
|
if quota >= 0 && period != 0 {
|
||||||
return uint32((uint64(quota) * 1000) / period)
|
return float32(quota) / float32(period)
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// CalculateVCpusFromMilliCpus converts from mCPU to CPU, taking the ceiling
|
|
||||||
// value when necessary
|
|
||||||
func CalculateVCpusFromMilliCpus(mCPU uint32) uint32 {
|
|
||||||
return (mCPU + 999) / 1000
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVirtDriveName returns the disk name format for virtio-blk
|
// GetVirtDriveName returns the disk name format for virtio-blk
|
||||||
// Reference: https://github.com/torvalds/linux/blob/master/drivers/block/virtio_blk.c @c0aa3e0916d7e531e69b02e426f7162dfb1c6c0
|
// Reference: https://github.com/torvalds/linux/blob/master/drivers/block/virtio_blk.c @c0aa3e0916d7e531e69b02e426f7162dfb1c6c0
|
||||||
func GetVirtDriveName(index int) (string, error) {
|
func GetVirtDriveName(index int) (string, error) {
|
||||||
|
@ -147,26 +147,23 @@ func TestWriteToFile(t *testing.T) {
|
|||||||
assert.True(reflect.DeepEqual(testData, data))
|
assert.True(reflect.DeepEqual(testData, data))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCalculateMilliCPUs(t *testing.T) {
|
func TestCalculateCPUsF(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
n := CalculateMilliCPUs(1, 1)
|
n := CalculateCPUsF(1, 1)
|
||||||
expected := uint32(1000)
|
expected := float32(1)
|
||||||
assert.Equal(n, expected)
|
assert.Equal(n, expected)
|
||||||
|
|
||||||
n = CalculateMilliCPUs(1, 0)
|
n = CalculateCPUsF(1, 0)
|
||||||
expected = uint32(0)
|
expected = float32(0)
|
||||||
assert.Equal(n, expected)
|
assert.Equal(n, expected)
|
||||||
|
|
||||||
n = CalculateMilliCPUs(-1, 1)
|
n = CalculateCPUsF(-1, 1)
|
||||||
|
expected = float32(0)
|
||||||
assert.Equal(n, expected)
|
assert.Equal(n, expected)
|
||||||
}
|
|
||||||
|
|
||||||
func TestCaluclateVCpusFromMilliCpus(t *testing.T) {
|
n = CalculateCPUsF(500, 1000)
|
||||||
assert := assert.New(t)
|
expected = float32(0.5)
|
||||||
|
|
||||||
n := CalculateVCpusFromMilliCpus(1)
|
|
||||||
expected := uint32(1)
|
|
||||||
assert.Equal(n, expected)
|
assert.Equal(n, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
|||||||
id: id,
|
id: id,
|
||||||
hypervisor: hypervisor,
|
hypervisor: hypervisor,
|
||||||
agent: agent,
|
agent: agent,
|
||||||
cpu: config.HypervisorConfig.NumVCPUs,
|
cpu: config.HypervisorConfig.NumVCPUs(),
|
||||||
memory: config.HypervisorConfig.MemorySize,
|
memory: config.HypervisorConfig.MemorySize,
|
||||||
store: store,
|
store: store,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -117,9 +117,11 @@ function deploy_kata() {
|
|||||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
|
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[4].value' --tag '!!str' "true"
|
||||||
# Let the `kata-deploy` create the default `kata` runtime class
|
# Let the `kata-deploy` create the default `kata` runtime class
|
||||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
|
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[5].value' --tag '!!str' "true"
|
||||||
|
# Enable 'default_vcpus' hypervisor annotation
|
||||||
|
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[6].value' "default_vcpus"
|
||||||
|
|
||||||
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
|
if [ "${KATA_HOST_OS}" = "cbl-mariner" ]; then
|
||||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[6].value' "initrd kernel"
|
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[6].value' "initrd kernel default_vcpus"
|
||||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[+].name' "HOST_OS"
|
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[+].name' "HOST_OS"
|
||||||
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[-1].value' "${KATA_HOST_OS}"
|
yq write -i "${tools_dir}/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml" 'spec.template.spec.containers[0].env[-1].value' "${KATA_HOST_OS}"
|
||||||
fi
|
fi
|
||||||
|
@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
#
|
||||||
|
# Copyright (c) 2023 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
load "${BATS_TEST_DIRNAME}/../../common.bash"
|
||||||
|
load "${BATS_TEST_DIRNAME}/tests_common.sh"
|
||||||
|
|
||||||
|
setup() {
|
||||||
|
[ "${KATA_HYPERVISOR}" == "dragonball" ] && \
|
||||||
|
skip "runtime-rs is still using the old vcpus allocation algorithm, skipping the test"
|
||||||
|
|
||||||
|
get_pod_config_dir
|
||||||
|
pods=( "vcpus-less-than-one-with-no-limits" "vcpus-less-than-one-with-limits" "vcpus-more-than-one-with-limits" )
|
||||||
|
expected_vcpus=( 1 1 2 )
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Check the number vcpus are correctly allocated to the sandbox" {
|
||||||
|
# Create the pods
|
||||||
|
kubectl create -f "${pod_config_dir}/pod-sandbox-vcpus-allocation.yaml"
|
||||||
|
|
||||||
|
# Check the pods
|
||||||
|
for i in {0..2}; do
|
||||||
|
kubectl wait --for=jsonpath='{.status.conditions[0].reason}'=PodCompleted --timeout=$timeout pod ${pods[$i]}
|
||||||
|
[ `kubectl logs ${pods[$i]}` -eq ${expected_vcpus[$i]} ]
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
teardown() {
|
||||||
|
[ "${KATA_HYPERVISOR}" == "dragonball" ] && \
|
||||||
|
skip "runtime-rs is still using the old vcpus allocation algorithm, skipping the test"
|
||||||
|
|
||||||
|
for pod in "${pods[@]}"; do
|
||||||
|
kubectl logs ${pod}
|
||||||
|
done
|
||||||
|
|
||||||
|
kubectl delete -f "${pod_config_dir}/pod-sandbox-vcpus-allocation.yaml"
|
||||||
|
}
|
@ -60,6 +60,7 @@ else
|
|||||||
K8S_TEST_NORMAL_HOST_UNION=( \
|
K8S_TEST_NORMAL_HOST_UNION=( \
|
||||||
"k8s-number-cpus.bats" \
|
"k8s-number-cpus.bats" \
|
||||||
"k8s-parallel.bats" \
|
"k8s-parallel.bats" \
|
||||||
|
"k8s-sandbox-vcpus-allocation.bats" \
|
||||||
"k8s-scale-nginx.bats" \
|
"k8s-scale-nginx.bats" \
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -0,0 +1,54 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2023 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: vcpus-less-than-one-with-no-limits
|
||||||
|
annotations:
|
||||||
|
io.katacontainers.config.hypervisor.default_vcpus: "0"
|
||||||
|
spec:
|
||||||
|
runtimeClassName: kata
|
||||||
|
containers:
|
||||||
|
- name: vcpus-less-than-one-with-no-limits
|
||||||
|
image: quay.io/prometheus/busybox:latest
|
||||||
|
command: ['nproc', '--all']
|
||||||
|
restartPolicy: Never
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: vcpus-less-than-one-with-limits
|
||||||
|
annotations:
|
||||||
|
io.katacontainers.config.hypervisor.default_vcpus: "0.75"
|
||||||
|
spec:
|
||||||
|
runtimeClassName: kata
|
||||||
|
containers:
|
||||||
|
- name: vcpus-less-than-one-with-limits
|
||||||
|
image: quay.io/prometheus/busybox:latest
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "0.25"
|
||||||
|
command: ['nproc', '--all']
|
||||||
|
restartPolicy: Never
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: vcpus-more-than-one-with-limits
|
||||||
|
annotations:
|
||||||
|
io.katacontainers.config.hypervisor.default_vcpus: "0.75"
|
||||||
|
spec:
|
||||||
|
runtimeClassName: kata
|
||||||
|
containers:
|
||||||
|
- name: vcpus-more-than-one-with-limits
|
||||||
|
image: quay.io/prometheus/busybox:latest
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "1.2"
|
||||||
|
command: ['nproc', '--all']
|
||||||
|
restartPolicy: Never
|
||||||
|
---
|
Loading…
Reference in New Issue
Block a user