mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-03 18:47:03 +00:00
sandbox/virtcontainers: memory resource hotplug when create container.
When create sandbox, we setup a sandbox of 2048M base memory, and then hotplug memory that is needed for every new container. And we change the unit of c.config.Resources.Mem from MiB to Byte in order to prevent the 4095B < memory < 1MiB from being lost. Depends-on:github.com/kata-containers/tests#813 Fixes #400 Signed-off-by: Clare Chen <clare.chenhui@huawei.com> Signed-off-by: Zichang Lin <linzichang@huawei.com>
This commit is contained in:
parent
c3cfe8204a
commit
8e2ee686bd
@ -112,6 +112,10 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err := s.getAndStoreGuestDetails(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Create Containers
|
// Create Containers
|
||||||
if err = s.createContainers(); err != nil {
|
if err = s.createContainers(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -122,11 +126,6 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// get and store guest details
|
|
||||||
if err := s.getAndStoreGuestDetails(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ type ContainerResources struct {
|
|||||||
VCPUs uint32
|
VCPUs uint32
|
||||||
|
|
||||||
// Mem is the memory that is being used by the container
|
// Mem is the memory that is being used by the container
|
||||||
MemMB uint32
|
MemByte int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerConfig describes one container runtime configuration.
|
// ContainerConfig describes one container runtime configuration.
|
||||||
@ -984,7 +984,9 @@ func (c *Container) update(resources specs.LinuxResources) error {
|
|||||||
|
|
||||||
newResources := ContainerResources{
|
newResources := ContainerResources{
|
||||||
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
|
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
|
||||||
MemMB: uint32(*resources.Memory.Limit >> 20),
|
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect.
|
||||||
|
// TODO use GetGuestDetails to get the guest OS page size.
|
||||||
|
MemByte: (*resources.Memory.Limit >> 12) << 12,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
|
if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
|
||||||
@ -1181,7 +1183,6 @@ func (c *Container) detachDevices() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) addResources() error {
|
func (c *Container) addResources() error {
|
||||||
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
|
|
||||||
if c.config == nil {
|
if c.config == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1189,7 +1190,7 @@ func (c *Container) addResources() error {
|
|||||||
// Container is being created, try to add the number of vCPUs specified
|
// Container is being created, try to add the number of vCPUs specified
|
||||||
vCPUs := c.config.Resources.VCPUs
|
vCPUs := c.config.Resources.VCPUs
|
||||||
if vCPUs != 0 {
|
if vCPUs != 0 {
|
||||||
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
|
virtLog.Debugf("create container: hot adding %d vCPUs", vCPUs)
|
||||||
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
|
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1210,14 +1211,32 @@ func (c *Container) addResources() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.sandbox.agent.onlineCPUMem(vcpusAdded, true)
|
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to add the number of Mem specified
|
||||||
|
addMemByte := c.config.Resources.MemByte
|
||||||
|
if addMemByte != 0 {
|
||||||
|
memHotplugMB, err := c.calcHotplugMemMiBSize(addMemByte)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
virtLog.Debugf("create container: hotplug %dMB mem", memHotplugMB)
|
||||||
|
_, err = c.sandbox.hypervisor.hotplugAddDevice(&memoryDevice{sizeMB: int(memHotplugMB)}, memoryDev)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) removeResources() error {
|
func (c *Container) removeResources() error {
|
||||||
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
|
|
||||||
if c.config == nil {
|
if c.config == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1237,6 +1256,7 @@ func (c *Container) removeResources() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// hot remove memory unsupported
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1259,7 +1279,7 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
|
|||||||
if oldVCPUs < newVCPUs {
|
if oldVCPUs < newVCPUs {
|
||||||
// hot add vCPUs
|
// hot add vCPUs
|
||||||
vCPUs = newVCPUs - oldVCPUs
|
vCPUs = newVCPUs - oldVCPUs
|
||||||
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
|
virtLog.Debugf("update container: hot adding %d vCPUs", vCPUs)
|
||||||
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
|
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1291,36 +1311,37 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) memHotplugValid(mem uint32) (uint32, error) {
|
// calculate hotplug memory size with memory block size of guestos
|
||||||
memorySectionSizeMB := c.sandbox.state.GuestMemoryBlockSizeMB
|
func (c *Container) calcHotplugMemMiBSize(memByte int64) (uint32, error) {
|
||||||
if memorySectionSizeMB == 0 {
|
memoryBlockSize := int64(c.sandbox.state.GuestMemoryBlockSizeMB)
|
||||||
return mem, nil
|
if memoryBlockSize == 0 {
|
||||||
|
return uint32(memByte >> 20), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: hot add memory aligned to memory section should be more properly. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
|
// TODO: hot add memory aligned to memory section should be more properly. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
|
||||||
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
|
return uint32(int64(math.Ceil(float64(memByte)/float64(memoryBlockSize<<20))) * memoryBlockSize), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) updateMemoryResources(oldResources ContainerResources, newResources *ContainerResources) error {
|
func (c *Container) updateMemoryResources(oldResources ContainerResources, newResources *ContainerResources) error {
|
||||||
oldMemMB := oldResources.MemMB
|
oldMemByte := oldResources.MemByte
|
||||||
newMemMB := newResources.MemMB
|
newMemByte := newResources.MemByte
|
||||||
c.Logger().WithFields(logrus.Fields{
|
c.Logger().WithFields(logrus.Fields{
|
||||||
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
|
"old-mem": fmt.Sprintf("%dByte", oldMemByte),
|
||||||
"new-mem": fmt.Sprintf("%dMB", newMemMB),
|
"new-mem": fmt.Sprintf("%dByte", newMemByte),
|
||||||
}).Debug("Request update memory")
|
}).Debug("Request update memory")
|
||||||
|
|
||||||
if oldMemMB == newMemMB {
|
if oldMemByte == newMemByte {
|
||||||
c.Logger().WithFields(logrus.Fields{
|
c.Logger().WithFields(logrus.Fields{
|
||||||
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
|
"old-mem": fmt.Sprintf("%dByte", oldMemByte),
|
||||||
"new-mem": fmt.Sprintf("%dMB", newMemMB),
|
"new-mem": fmt.Sprintf("%dByte", newMemByte),
|
||||||
}).Debug("the actual number of Mem will not be modified")
|
}).Debug("the actual number of Mem will not be modified")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldMemMB < newMemMB {
|
if oldMemByte < newMemByte {
|
||||||
// hot add memory
|
// hot add memory
|
||||||
addMemMB := newMemMB - oldMemMB
|
addMemByte := newMemByte - oldMemByte
|
||||||
memHotplugMB, err := c.memHotplugValid(addMemMB)
|
memHotplugMB, err := c.calcHotplugMemMiBSize(addMemByte)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1337,16 +1358,16 @@ func (c *Container) updateMemoryResources(oldResources ContainerResources, newRe
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
||||||
}
|
}
|
||||||
newResources.MemMB = oldMemMB + uint32(memoryAdded)
|
newResources.MemByte = oldMemByte + int64(memoryAdded)<<20
|
||||||
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
|
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if oldMemMB > newMemMB {
|
if oldMemByte > newMemByte {
|
||||||
// Try to remove a memory device with the difference
|
// Try to remove a memory device with the difference
|
||||||
// from new memory and old memory
|
// from new memory and old memory
|
||||||
removeMem := &memoryDevice{
|
removeMem := &memoryDevice{
|
||||||
sizeMB: int(oldMemMB - newMemMB),
|
sizeMB: int((oldMemByte - newMemByte) >> 20),
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := c.sandbox.hypervisor.hotplugRemoveDevice(removeMem, memoryDev)
|
data, err := c.sandbox.hypervisor.hotplugRemoveDevice(removeMem, memoryDev)
|
||||||
@ -1357,8 +1378,7 @@ func (c *Container) updateMemoryResources(oldResources ContainerResources, newRe
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
||||||
}
|
}
|
||||||
newResources.MemMB = oldMemMB - uint32(memoryRemoved)
|
newResources.MemByte = oldMemByte - int64(memoryRemoved)<<20
|
||||||
newResources.MemMB = oldResources.MemMB
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1366,7 +1386,7 @@ func (c *Container) updateMemoryResources(oldResources ContainerResources, newRe
|
|||||||
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
|
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
|
||||||
// initialize with oldResources
|
// initialize with oldResources
|
||||||
c.config.Resources.VCPUs = oldResources.VCPUs
|
c.config.Resources.VCPUs = oldResources.VCPUs
|
||||||
c.config.Resources.MemMB = oldResources.MemMB
|
c.config.Resources.MemByte = oldResources.MemByte
|
||||||
|
|
||||||
// Cpu is not updated if period and/or quota not set
|
// Cpu is not updated if period and/or quota not set
|
||||||
if newResources.VCPUs != 0 {
|
if newResources.VCPUs != 0 {
|
||||||
@ -1382,13 +1402,13 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Memory is not updated if memory limit not set
|
// Memory is not updated if memory limit not set
|
||||||
if newResources.MemMB != 0 {
|
if newResources.MemByte != 0 {
|
||||||
if err := c.updateMemoryResources(oldResources, &newResources); err != nil {
|
if err := c.updateMemoryResources(oldResources, &newResources); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set and save container's config MemMB field only
|
// Set and save container's config Mem field only
|
||||||
c.config.Resources.MemMB = newResources.MemMB
|
c.config.Resources.MemByte = newResources.MemByte
|
||||||
return c.storeContainer()
|
return c.storeContainer()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -412,25 +412,6 @@ func (spec *CompatOCISpec) SandboxID() (string, error) {
|
|||||||
return "", fmt.Errorf("Could not find sandbox ID")
|
return "", fmt.Errorf("Could not find sandbox ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateVMConfig(ocispec CompatOCISpec, config *RuntimeConfig) error {
|
|
||||||
if ocispec.Linux == nil || ocispec.Linux.Resources == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if ocispec.Linux.Resources.Memory != nil &&
|
|
||||||
ocispec.Linux.Resources.Memory.Limit != nil {
|
|
||||||
memBytes := *ocispec.Linux.Resources.Memory.Limit
|
|
||||||
if memBytes <= 0 {
|
|
||||||
return fmt.Errorf("Invalid OCI memory limit %d", memBytes)
|
|
||||||
}
|
|
||||||
// Use some math magic to round up to the nearest Mb.
|
|
||||||
// This has the side effect that we can never have <1Mb assigned.
|
|
||||||
config.HypervisorConfig.MemorySize = uint32((memBytes + (1024*1024 - 1)) / (1024 * 1024))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
|
func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
|
||||||
assetAnnotations := []string{
|
assetAnnotations := []string{
|
||||||
vcAnnotations.KernelPath,
|
vcAnnotations.KernelPath,
|
||||||
@ -469,11 +450,6 @@ func SandboxConfig(ocispec CompatOCISpec, runtime RuntimeConfig, bundlePath, cid
|
|||||||
return vc.SandboxConfig{}, err
|
return vc.SandboxConfig{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = updateVMConfig(ocispec, &runtime)
|
|
||||||
if err != nil {
|
|
||||||
return vc.SandboxConfig{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ociSpecJSON, err := json.Marshal(ocispec)
|
ociSpecJSON, err := json.Marshal(ocispec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return vc.SandboxConfig{}, err
|
return vc.SandboxConfig{}, err
|
||||||
@ -570,7 +546,8 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
|
|||||||
}
|
}
|
||||||
if ocispec.Linux.Resources.Memory != nil {
|
if ocispec.Linux.Resources.Memory != nil {
|
||||||
if ocispec.Linux.Resources.Memory.Limit != nil {
|
if ocispec.Linux.Resources.Memory.Limit != nil {
|
||||||
resources.MemMB = uint32(*ocispec.Linux.Resources.Memory.Limit >> 20)
|
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect
|
||||||
|
resources.MemByte = (*ocispec.Linux.Resources.Memory.Limit >> 12) << 12
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,54 +254,6 @@ func TestMinimalSandboxConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateVmConfig(t *testing.T) {
|
|
||||||
var limitBytes int64 = 128 * 1024 * 1024
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
config := RuntimeConfig{
|
|
||||||
HypervisorConfig: vc.HypervisorConfig{
|
|
||||||
MemorySize: 2048,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedMem := uint32(128)
|
|
||||||
|
|
||||||
ocispec := CompatOCISpec{
|
|
||||||
Spec: specs.Spec{
|
|
||||||
Linux: &specs.Linux{
|
|
||||||
Resources: &specs.LinuxResources{
|
|
||||||
Memory: &specs.LinuxMemory{
|
|
||||||
Limit: &limitBytes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := updateVMConfig(ocispec, &config)
|
|
||||||
assert.Nil(err)
|
|
||||||
assert.Equal(config.HypervisorConfig.MemorySize, expectedMem)
|
|
||||||
|
|
||||||
limitBytes = -128 * 1024 * 1024
|
|
||||||
ocispec.Linux.Resources.Memory.Limit = &limitBytes
|
|
||||||
|
|
||||||
err = updateVMConfig(ocispec, &config)
|
|
||||||
assert.NotNil(err)
|
|
||||||
|
|
||||||
// Test case when Memory is nil
|
|
||||||
ocispec.Spec.Linux.Resources.Memory = nil
|
|
||||||
err = updateVMConfig(ocispec, &config)
|
|
||||||
assert.Nil(err)
|
|
||||||
|
|
||||||
// Test case when CPU is nil
|
|
||||||
ocispec.Spec.Linux.Resources.CPU = nil
|
|
||||||
limitBytes = 20
|
|
||||||
ocispec.Linux.Resources.Memory = &specs.LinuxMemory{Limit: &limitBytes}
|
|
||||||
err = updateVMConfig(ocispec, &config)
|
|
||||||
assert.Nil(err)
|
|
||||||
assert.NotEqual(config.HypervisorConfig.MemorySize, expectedMem)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
|
func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
|
||||||
ociState := StatusToOCIState(cStatus)
|
ociState := StatusToOCIState(cStatus)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user