mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-04 11:06:21 +00:00
Merge pull request #786 from linzichang/master
sandbox/virtcontainers: memory resource hotplug when create container.
This commit is contained in:
commit
0a652a1ab8
@ -75,7 +75,7 @@ default_bridges = @DEFBRIDGES@
|
|||||||
|
|
||||||
# Default memory size in MiB for SB/VM.
|
# Default memory size in MiB for SB/VM.
|
||||||
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
# If unspecified then it will be set @DEFMEMSZ@ MiB.
|
||||||
#default_memory = @DEFMEMSZ@
|
default_memory = @DEFMEMSZ@
|
||||||
#
|
#
|
||||||
# Default memory slots per SB/VM.
|
# Default memory slots per SB/VM.
|
||||||
# If unspecified then it will be set @DEFMEMSLOTS@.
|
# If unspecified then it will be set @DEFMEMSLOTS@.
|
||||||
|
@ -896,7 +896,6 @@ func TestCreateSandboxConfigFail(t *testing.T) {
|
|||||||
|
|
||||||
_, err = createSandbox(context.Background(), spec, runtimeConfig, testContainerID, bundlePath, testConsole, true, true)
|
_, err = createSandbox(context.Background(), spec, runtimeConfig, testContainerID, bundlePath, testConsole, true, true)
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
assert.False(vcmock.IsMockError(err))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateCreateSandboxFail(t *testing.T) {
|
func TestCreateCreateSandboxFail(t *testing.T) {
|
||||||
|
@ -112,6 +112,10 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if err := s.getAndStoreGuestDetails(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Create Containers
|
// Create Containers
|
||||||
if err = s.createContainers(); err != nil {
|
if err = s.createContainers(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -122,11 +126,6 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// get and store guest details
|
|
||||||
if err := s.getAndStoreGuestDetails(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ type ContainerResources struct {
|
|||||||
VCPUs uint32
|
VCPUs uint32
|
||||||
|
|
||||||
// Mem is the memory that is being used by the container
|
// Mem is the memory that is being used by the container
|
||||||
MemMB uint32
|
MemByte int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerConfig describes one container runtime configuration.
|
// ContainerConfig describes one container runtime configuration.
|
||||||
@ -984,13 +984,19 @@ func (c *Container) update(resources specs.LinuxResources) error {
|
|||||||
|
|
||||||
newResources := ContainerResources{
|
newResources := ContainerResources{
|
||||||
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
|
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
|
||||||
MemMB: uint32(*resources.Memory.Limit >> 20),
|
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect.
|
||||||
|
// TODO use GetGuestDetails to get the guest OS page size.
|
||||||
|
MemByte: (*resources.Memory.Limit >> 12) << 12,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
|
if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := c.storeContainer(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return c.sandbox.agent.updateContainer(c.sandbox, *c, resources)
|
return c.sandbox.agent.updateContainer(c.sandbox, *c, resources)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1181,43 +1187,23 @@ func (c *Container) detachDevices() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) addResources() error {
|
func (c *Container) addResources() error {
|
||||||
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
|
|
||||||
if c.config == nil {
|
if c.config == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container is being created, try to add the number of vCPUs specified
|
addResources := ContainerResources{
|
||||||
vCPUs := c.config.Resources.VCPUs
|
VCPUs: c.config.Resources.VCPUs,
|
||||||
if vCPUs != 0 {
|
MemByte: c.config.Resources.MemByte,
|
||||||
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
|
}
|
||||||
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
vcpusAdded, ok := data.(uint32)
|
if err := c.updateResources(ContainerResources{0, 0}, addResources); err != nil {
|
||||||
if !ok {
|
return err
|
||||||
return fmt.Errorf("Could not get the number of vCPUs added, got %+v", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A different number of vCPUs was added, we have to update
|
|
||||||
// the resources in order to don't remove vCPUs used by other containers.
|
|
||||||
if vcpusAdded != vCPUs {
|
|
||||||
// Set and save container's config
|
|
||||||
c.config.Resources.VCPUs = vcpusAdded
|
|
||||||
if err := c.storeContainer(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.sandbox.agent.onlineCPUMem(vcpusAdded, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) removeResources() error {
|
func (c *Container) removeResources() error {
|
||||||
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
|
|
||||||
if c.config == nil {
|
if c.config == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1237,11 +1223,12 @@ func (c *Container) removeResources() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// hot remove memory unsupported
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) updateVCPUResources(oldResources, newResources ContainerResources) error {
|
func (c *Container) updateVCPUResources(oldResources ContainerResources, newResources *ContainerResources) error {
|
||||||
var vCPUs uint32
|
var vCPUs uint32
|
||||||
oldVCPUs := oldResources.VCPUs
|
oldVCPUs := oldResources.VCPUs
|
||||||
newVCPUs := newResources.VCPUs
|
newVCPUs := newResources.VCPUs
|
||||||
@ -1254,9 +1241,7 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
|
|||||||
"new-vcpus": fmt.Sprintf("%d", newVCPUs),
|
"new-vcpus": fmt.Sprintf("%d", newVCPUs),
|
||||||
}).Debug("the actual number of vCPUs will not be modified")
|
}).Debug("the actual number of vCPUs will not be modified")
|
||||||
return nil
|
return nil
|
||||||
}
|
} else if oldVCPUs < newVCPUs {
|
||||||
|
|
||||||
if oldVCPUs < newVCPUs {
|
|
||||||
// hot add vCPUs
|
// hot add vCPUs
|
||||||
vCPUs = newVCPUs - oldVCPUs
|
vCPUs = newVCPUs - oldVCPUs
|
||||||
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
|
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
|
||||||
@ -1288,39 +1273,39 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
|
|||||||
// recalculate the actual number of vCPUs if a different number of vCPUs was removed
|
// recalculate the actual number of vCPUs if a different number of vCPUs was removed
|
||||||
newResources.VCPUs = oldVCPUs - vcpusRemoved
|
newResources.VCPUs = oldVCPUs - vcpusRemoved
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) memHotplugValid(mem uint32) (uint32, error) {
|
// calculate hotplug memory size with memory block size of guestos
|
||||||
memorySectionSizeMB := c.sandbox.state.GuestMemoryBlockSizeMB
|
func (c *Container) calcHotplugMemMiBSize(memByte int64) (uint32, error) {
|
||||||
if memorySectionSizeMB == 0 {
|
memoryBlockSize := int64(c.sandbox.state.GuestMemoryBlockSizeMB)
|
||||||
return mem, nil
|
if memoryBlockSize == 0 {
|
||||||
|
return uint32(memByte >> 20), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: hot add memory aligned to memory section should be more properly. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
|
// TODO: hot add memory aligned to memory section should be more properly. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
|
||||||
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
|
return uint32(int64(math.Ceil(float64(memByte)/float64(memoryBlockSize<<20))) * memoryBlockSize), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) updateMemoryResources(oldResources ContainerResources, newResources *ContainerResources) error {
|
func (c *Container) updateMemoryResources(oldResources ContainerResources, newResources *ContainerResources) error {
|
||||||
oldMemMB := oldResources.MemMB
|
oldMemByte := oldResources.MemByte
|
||||||
newMemMB := newResources.MemMB
|
newMemByte := newResources.MemByte
|
||||||
c.Logger().WithFields(logrus.Fields{
|
c.Logger().WithFields(logrus.Fields{
|
||||||
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
|
"old-mem": fmt.Sprintf("%dByte", oldMemByte),
|
||||||
"new-mem": fmt.Sprintf("%dMB", newMemMB),
|
"new-mem": fmt.Sprintf("%dByte", newMemByte),
|
||||||
}).Debug("Request update memory")
|
}).Debug("Request update memory")
|
||||||
|
|
||||||
if oldMemMB == newMemMB {
|
if oldMemByte == newMemByte {
|
||||||
c.Logger().WithFields(logrus.Fields{
|
c.Logger().WithFields(logrus.Fields{
|
||||||
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
|
"old-mem": fmt.Sprintf("%dByte", oldMemByte),
|
||||||
"new-mem": fmt.Sprintf("%dMB", newMemMB),
|
"new-mem": fmt.Sprintf("%dByte", newMemByte),
|
||||||
}).Debug("the actual number of Mem will not be modified")
|
}).Debug("the actual number of Mem will not be modified")
|
||||||
return nil
|
return nil
|
||||||
}
|
} else if oldMemByte < newMemByte {
|
||||||
|
|
||||||
if oldMemMB < newMemMB {
|
|
||||||
// hot add memory
|
// hot add memory
|
||||||
addMemMB := newMemMB - oldMemMB
|
addMemByte := newMemByte - oldMemByte
|
||||||
memHotplugMB, err := c.memHotplugValid(addMemMB)
|
memHotplugMB, err := c.calcHotplugMemMiBSize(addMemByte)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1337,16 +1322,15 @@ func (c *Container) updateMemoryResources(oldResources ContainerResources, newRe
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
||||||
}
|
}
|
||||||
newResources.MemMB = oldMemMB + uint32(memoryAdded)
|
newResources.MemByte = oldMemByte + int64(memoryAdded)<<20
|
||||||
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
|
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
if oldMemMB > newMemMB {
|
|
||||||
// Try to remove a memory device with the difference
|
// Try to remove a memory device with the difference
|
||||||
// from new memory and old memory
|
// from new memory and old memory
|
||||||
removeMem := &memoryDevice{
|
removeMem := &memoryDevice{
|
||||||
sizeMB: int(oldMemMB - newMemMB),
|
sizeMB: int((oldMemByte - newMemByte) >> 20),
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := c.sandbox.hypervisor.hotplugRemoveDevice(removeMem, memoryDev)
|
data, err := c.sandbox.hypervisor.hotplugRemoveDevice(removeMem, memoryDev)
|
||||||
@ -1357,39 +1341,34 @@ func (c *Container) updateMemoryResources(oldResources ContainerResources, newRe
|
|||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
return fmt.Errorf("Could not get the memory added, got %+v", data)
|
||||||
}
|
}
|
||||||
newResources.MemMB = oldMemMB - uint32(memoryRemoved)
|
newResources.MemByte = oldMemByte - int64(memoryRemoved)<<20
|
||||||
newResources.MemMB = oldResources.MemMB
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
|
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
|
||||||
// initialize with oldResources
|
// initialize with oldResources
|
||||||
c.config.Resources.VCPUs = oldResources.VCPUs
|
c.config.Resources.VCPUs = oldResources.VCPUs
|
||||||
c.config.Resources.MemMB = oldResources.MemMB
|
c.config.Resources.MemByte = oldResources.MemByte
|
||||||
|
|
||||||
// Cpu is not updated if period and/or quota not set
|
// Cpu is not updated if period and/or quota not set
|
||||||
if newResources.VCPUs != 0 {
|
if newResources.VCPUs != 0 {
|
||||||
if err := c.updateVCPUResources(oldResources, newResources); err != nil {
|
if err := c.updateVCPUResources(oldResources, &newResources); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Set container's config VCPUs field only
|
||||||
// Set and save container's config VCPUs field only
|
|
||||||
c.config.Resources.VCPUs = newResources.VCPUs
|
c.config.Resources.VCPUs = newResources.VCPUs
|
||||||
if err := c.storeContainer(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Memory is not updated if memory limit not set
|
// Memory is not updated if memory limit not set
|
||||||
if newResources.MemMB != 0 {
|
if newResources.MemByte != 0 {
|
||||||
if err := c.updateMemoryResources(oldResources, &newResources); err != nil {
|
if err := c.updateMemoryResources(oldResources, &newResources); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set and save container's config MemMB field only
|
// Set container's config MemByte field only
|
||||||
c.config.Resources.MemMB = newResources.MemMB
|
c.config.Resources.MemByte = newResources.MemByte
|
||||||
return c.storeContainer()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -320,15 +320,15 @@ func TestContainerAddResources(t *testing.T) {
|
|||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
|
||||||
vCPUs := uint32(5)
|
vCPUs := uint32(5)
|
||||||
|
memByte := int64(104857600)
|
||||||
c.config.Resources = ContainerResources{
|
c.config.Resources = ContainerResources{
|
||||||
VCPUs: vCPUs,
|
VCPUs: vCPUs,
|
||||||
|
MemByte: memByte,
|
||||||
}
|
}
|
||||||
c.sandbox = &Sandbox{
|
c.sandbox = &Sandbox{
|
||||||
hypervisor: &mockHypervisor{
|
hypervisor: &mockHypervisor{},
|
||||||
vCPUs: vCPUs,
|
agent: &noopAgent{},
|
||||||
},
|
storage: &filesystem{},
|
||||||
agent: &noopAgent{},
|
|
||||||
storage: &filesystem{},
|
|
||||||
}
|
}
|
||||||
err = c.addResources()
|
err = c.addResources()
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
@ -361,16 +361,94 @@ func TestContainerRemoveResources(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
c.sandbox = &Sandbox{
|
c.sandbox = &Sandbox{
|
||||||
hypervisor: &mockHypervisor{
|
hypervisor: &mockHypervisor{},
|
||||||
vCPUs: vCPUs,
|
storage: &filesystem{},
|
||||||
},
|
|
||||||
storage: &filesystem{},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.removeResources()
|
err = c.removeResources()
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestContainerUpdateResources(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
sandbox := &Sandbox{
|
||||||
|
hypervisor: &mockHypervisor{},
|
||||||
|
agent: &noopAgent{},
|
||||||
|
storage: &filesystem{},
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &Container{
|
||||||
|
sandbox: sandbox,
|
||||||
|
}
|
||||||
|
c.config = &ContainerConfig{Annotations: make(map[string]string)}
|
||||||
|
|
||||||
|
// VCPUs is equal to zero
|
||||||
|
oldResource := ContainerResources{
|
||||||
|
VCPUs: 0,
|
||||||
|
MemByte: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
newResource := ContainerResources{
|
||||||
|
VCPUs: 0,
|
||||||
|
MemByte: 104857600,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := c.updateResources(oldResource, newResource)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// MemByte is equal to zero
|
||||||
|
newResource = ContainerResources{
|
||||||
|
VCPUs: 5,
|
||||||
|
MemByte: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.updateResources(oldResource, newResource)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// oldResource is equal to newResource
|
||||||
|
oldResource = ContainerResources{
|
||||||
|
VCPUs: 5,
|
||||||
|
MemByte: 104857600,
|
||||||
|
}
|
||||||
|
|
||||||
|
newResource = ContainerResources{
|
||||||
|
VCPUs: 5,
|
||||||
|
MemByte: 104857600,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.updateResources(oldResource, newResource)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// memory hotplug and cpu hotplug
|
||||||
|
oldResource = ContainerResources{
|
||||||
|
VCPUs: 5,
|
||||||
|
MemByte: 104857600,
|
||||||
|
}
|
||||||
|
|
||||||
|
newResource = ContainerResources{
|
||||||
|
VCPUs: 10,
|
||||||
|
MemByte: 209715200,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.updateResources(oldResource, newResource)
|
||||||
|
assert.Nil(err)
|
||||||
|
|
||||||
|
// memory hot remove and cpu hot remove
|
||||||
|
oldResource = ContainerResources{
|
||||||
|
VCPUs: 10,
|
||||||
|
MemByte: 209715200,
|
||||||
|
}
|
||||||
|
|
||||||
|
newResource = ContainerResources{
|
||||||
|
VCPUs: 5,
|
||||||
|
MemByte: 104857600,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.updateResources(oldResource, newResource)
|
||||||
|
assert.Nil(err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestContainerEnterErrorsOnContainerStates(t *testing.T) {
|
func TestContainerEnterErrorsOnContainerStates(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
c := &Container{
|
c := &Container{
|
||||||
|
@ -8,7 +8,6 @@ package virtcontainers
|
|||||||
import "context"
|
import "context"
|
||||||
|
|
||||||
type mockHypervisor struct {
|
type mockHypervisor struct {
|
||||||
vCPUs uint32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockHypervisor) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
func (m *mockHypervisor) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
|
||||||
@ -63,7 +62,7 @@ func (m *mockHypervisor) addDevice(devInfo interface{}, devType deviceType) erro
|
|||||||
func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||||
switch devType {
|
switch devType {
|
||||||
case cpuDev:
|
case cpuDev:
|
||||||
return m.vCPUs, nil
|
return devInfo.(uint32), nil
|
||||||
case memoryDev:
|
case memoryDev:
|
||||||
memdev := devInfo.(*memoryDevice)
|
memdev := devInfo.(*memoryDevice)
|
||||||
return memdev.sizeMB, nil
|
return memdev.sizeMB, nil
|
||||||
@ -74,7 +73,9 @@ func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceTyp
|
|||||||
func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||||
switch devType {
|
switch devType {
|
||||||
case cpuDev:
|
case cpuDev:
|
||||||
return m.vCPUs, nil
|
return devInfo.(uint32), nil
|
||||||
|
case memoryDev:
|
||||||
|
return 0, nil
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -412,25 +412,6 @@ func (spec *CompatOCISpec) SandboxID() (string, error) {
|
|||||||
return "", fmt.Errorf("Could not find sandbox ID")
|
return "", fmt.Errorf("Could not find sandbox ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateVMConfig(ocispec CompatOCISpec, config *RuntimeConfig) error {
|
|
||||||
if ocispec.Linux == nil || ocispec.Linux.Resources == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if ocispec.Linux.Resources.Memory != nil &&
|
|
||||||
ocispec.Linux.Resources.Memory.Limit != nil {
|
|
||||||
memBytes := *ocispec.Linux.Resources.Memory.Limit
|
|
||||||
if memBytes <= 0 {
|
|
||||||
return fmt.Errorf("Invalid OCI memory limit %d", memBytes)
|
|
||||||
}
|
|
||||||
// Use some math magic to round up to the nearest Mb.
|
|
||||||
// This has the side effect that we can never have <1Mb assigned.
|
|
||||||
config.HypervisorConfig.MemorySize = uint32((memBytes + (1024*1024 - 1)) / (1024 * 1024))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
|
func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
|
||||||
assetAnnotations := []string{
|
assetAnnotations := []string{
|
||||||
vcAnnotations.KernelPath,
|
vcAnnotations.KernelPath,
|
||||||
@ -469,11 +450,6 @@ func SandboxConfig(ocispec CompatOCISpec, runtime RuntimeConfig, bundlePath, cid
|
|||||||
return vc.SandboxConfig{}, err
|
return vc.SandboxConfig{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = updateVMConfig(ocispec, &runtime)
|
|
||||||
if err != nil {
|
|
||||||
return vc.SandboxConfig{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ociSpecJSON, err := json.Marshal(ocispec)
|
ociSpecJSON, err := json.Marshal(ocispec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return vc.SandboxConfig{}, err
|
return vc.SandboxConfig{}, err
|
||||||
@ -570,7 +546,9 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
|
|||||||
}
|
}
|
||||||
if ocispec.Linux.Resources.Memory != nil {
|
if ocispec.Linux.Resources.Memory != nil {
|
||||||
if ocispec.Linux.Resources.Memory.Limit != nil {
|
if ocispec.Linux.Resources.Memory.Limit != nil {
|
||||||
resources.MemMB = uint32(*ocispec.Linux.Resources.Memory.Limit >> 20)
|
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect
|
||||||
|
// TODO use GetGuestDetails to get the guest OS page size.
|
||||||
|
resources.MemByte = (*ocispec.Linux.Resources.Memory.Limit >> 12) << 12
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,54 +254,6 @@ func TestMinimalSandboxConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateVmConfig(t *testing.T) {
|
|
||||||
var limitBytes int64 = 128 * 1024 * 1024
|
|
||||||
assert := assert.New(t)
|
|
||||||
|
|
||||||
config := RuntimeConfig{
|
|
||||||
HypervisorConfig: vc.HypervisorConfig{
|
|
||||||
MemorySize: 2048,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedMem := uint32(128)
|
|
||||||
|
|
||||||
ocispec := CompatOCISpec{
|
|
||||||
Spec: specs.Spec{
|
|
||||||
Linux: &specs.Linux{
|
|
||||||
Resources: &specs.LinuxResources{
|
|
||||||
Memory: &specs.LinuxMemory{
|
|
||||||
Limit: &limitBytes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
err := updateVMConfig(ocispec, &config)
|
|
||||||
assert.Nil(err)
|
|
||||||
assert.Equal(config.HypervisorConfig.MemorySize, expectedMem)
|
|
||||||
|
|
||||||
limitBytes = -128 * 1024 * 1024
|
|
||||||
ocispec.Linux.Resources.Memory.Limit = &limitBytes
|
|
||||||
|
|
||||||
err = updateVMConfig(ocispec, &config)
|
|
||||||
assert.NotNil(err)
|
|
||||||
|
|
||||||
// Test case when Memory is nil
|
|
||||||
ocispec.Spec.Linux.Resources.Memory = nil
|
|
||||||
err = updateVMConfig(ocispec, &config)
|
|
||||||
assert.Nil(err)
|
|
||||||
|
|
||||||
// Test case when CPU is nil
|
|
||||||
ocispec.Spec.Linux.Resources.CPU = nil
|
|
||||||
limitBytes = 20
|
|
||||||
ocispec.Linux.Resources.Memory = &specs.LinuxMemory{Limit: &limitBytes}
|
|
||||||
err = updateVMConfig(ocispec, &config)
|
|
||||||
assert.Nil(err)
|
|
||||||
assert.NotEqual(config.HypervisorConfig.MemorySize, expectedMem)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
|
func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
|
||||||
ociState := StatusToOCIState(cStatus)
|
ociState := StatusToOCIState(cStatus)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user