mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-30 17:22:33 +00:00
virtcontainers: move resource calculation to its own function
Make cpu and memory calculation in a different function this help to reduce the function complexity and easy unit test. Fixes: #1296 Signed-off-by: Jose Carlos Venegas Munoz <jose.carlos.venegas.munoz@intel.com>
This commit is contained in:
parent
7504d9e50c
commit
0061e166d4
@ -7,6 +7,7 @@ package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@ -1608,36 +1609,21 @@ func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
|
||||
func (s *Sandbox) updateResources() error {
|
||||
// the hypervisor.MemorySize is the amount of memory reserved for
|
||||
// the VM and contaniners without memory limit
|
||||
sumResources := specs.LinuxResources{
|
||||
Memory: &specs.LinuxMemory{
|
||||
Limit: new(int64),
|
||||
},
|
||||
CPU: &specs.LinuxCPU{
|
||||
Period: new(uint64),
|
||||
Quota: new(int64),
|
||||
},
|
||||
|
||||
if s == nil {
|
||||
return errors.New("sandbox is nil")
|
||||
}
|
||||
|
||||
var mCPU uint32
|
||||
|
||||
// Calculate running total of memory and mCPUs requested
|
||||
for _, c := range s.config.Containers {
|
||||
if m := c.Resources.Memory; m != nil && m.Limit != nil {
|
||||
*sumResources.Memory.Limit += *m.Limit
|
||||
}
|
||||
if cpu := c.Resources.CPU; cpu != nil {
|
||||
if cpu.Period != nil && cpu.Quota != nil {
|
||||
mCPU += utils.CalculateMilliCPUs(*cpu.Quota, *cpu.Period)
|
||||
if s.config == nil {
|
||||
return fmt.Errorf("sandbox config is nil")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
sandboxVCPUs := utils.CalculateVCpusFromMilliCpus(mCPU)
|
||||
sandboxVCPUs := s.calculateSandboxCPUs()
|
||||
// Add default vcpus for sandbox
|
||||
sandboxVCPUs += s.hypervisor.hypervisorConfig().NumVCPUs
|
||||
|
||||
sandboxMemoryByte := int64(s.hypervisor.hypervisorConfig().MemorySize) << utils.MibToBytesShift
|
||||
sandboxMemoryByte += *sumResources.Memory.Limit
|
||||
sandboxMemoryByte += s.calculateSandboxMemory()
|
||||
|
||||
// Update VCPUs
|
||||
s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs")
|
||||
@ -1666,3 +1652,27 @@ func (s *Sandbox) updateResources() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) calculateSandboxMemory() int64 {
|
||||
memorySandbox := int64(0)
|
||||
for _, c := range s.config.Containers {
|
||||
if m := c.Resources.Memory; m != nil && m.Limit != nil {
|
||||
memorySandbox += *m.Limit
|
||||
}
|
||||
}
|
||||
return memorySandbox
|
||||
}
|
||||
|
||||
func (s *Sandbox) calculateSandboxCPUs() uint32 {
|
||||
mCPU := uint32(0)
|
||||
|
||||
for _, c := range s.config.Containers {
|
||||
if cpu := c.Resources.CPU; cpu != nil {
|
||||
if cpu.Period != nil && cpu.Quota != nil {
|
||||
mCPU += utils.CalculateMilliCPUs(*cpu.Quota, *cpu.Period)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return utils.CalculateVCpusFromMilliCpus(mCPU)
|
||||
}
|
||||
|
@ -107,6 +107,67 @@ func TestCreateMockSandbox(t *testing.T) {
|
||||
defer cleanUp()
|
||||
}
|
||||
|
||||
func TestCalculateSandboxCPUs(t *testing.T) {
|
||||
sandbox := &Sandbox{}
|
||||
sandbox.config = &SandboxConfig{}
|
||||
unconstrained := newTestContainerConfigNoop("cont-00001")
|
||||
constrained := newTestContainerConfigNoop("cont-00001")
|
||||
quota := int64(4000)
|
||||
period := uint64(1000)
|
||||
constrained.Resources.CPU = &specs.LinuxCPU{Period: &period, Quota: "a}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
containers []ContainerConfig
|
||||
want uint32
|
||||
}{
|
||||
{"1-unconstrained", []ContainerConfig{unconstrained}, 0},
|
||||
{"2-unconstrained", []ContainerConfig{unconstrained, unconstrained}, 0},
|
||||
{"1-constrained", []ContainerConfig{constrained}, 4},
|
||||
{"2-constrained", []ContainerConfig{constrained, constrained}, 8},
|
||||
{"3-mix-constraints", []ContainerConfig{unconstrained, constrained, constrained}, 8},
|
||||
{"3-constrained", []ContainerConfig{constrained, constrained, constrained}, 12},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sandbox.config.Containers = tt.containers
|
||||
if got := sandbox.calculateSandboxCPUs(); got != tt.want {
|
||||
t.Errorf("calculateSandboxCPUs() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalculateSandboxMem(t *testing.T) {
|
||||
sandbox := &Sandbox{}
|
||||
sandbox.config = &SandboxConfig{}
|
||||
unconstrained := newTestContainerConfigNoop("cont-00001")
|
||||
constrained := newTestContainerConfigNoop("cont-00001")
|
||||
limit := int64(4000)
|
||||
constrained.Resources.Memory = &specs.LinuxMemory{Limit: &limit}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
containers []ContainerConfig
|
||||
want int64
|
||||
}{
|
||||
{"1-unconstrained", []ContainerConfig{unconstrained}, 0},
|
||||
{"2-unconstrained", []ContainerConfig{unconstrained, unconstrained}, 0},
|
||||
{"1-constrained", []ContainerConfig{constrained}, limit},
|
||||
{"2-constrained", []ContainerConfig{constrained, constrained}, limit * 2},
|
||||
{"3-mix-constraints", []ContainerConfig{unconstrained, constrained, constrained}, limit * 2},
|
||||
{"3-constrained", []ContainerConfig{constrained, constrained, constrained}, limit * 3},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sandbox.config.Containers = tt.containers
|
||||
if got := sandbox.calculateSandboxMemory(); got != tt.want {
|
||||
t.Errorf("calculateSandboxMemory() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateSandboxEmptyID(t *testing.T) {
|
||||
hConfig := newHypervisorConfig(nil, nil)
|
||||
|
||||
@ -1615,4 +1676,7 @@ func TestSandboxUpdateResources(t *testing.T) {
|
||||
c.Resources.CPU.Quota = &containerCPUQouta
|
||||
}
|
||||
err = s.updateResources()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user