Merge pull request #786 from linzichang/master

sandbox/virtcontainers: memory resource hotplug when create container.
This commit is contained in:
Graham Whaley 2018-10-18 09:43:24 +01:00 committed by GitHub
commit 0a652a1ab8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 145 additions and 159 deletions

View File

@ -75,7 +75,7 @@ default_bridges = @DEFBRIDGES@
# Default memory size in MiB for SB/VM.
# If unspecified then it will be set @DEFMEMSZ@ MiB.
#default_memory = @DEFMEMSZ@
default_memory = @DEFMEMSZ@
#
# Default memory slots per SB/VM.
# If unspecified then it will be set @DEFMEMSLOTS@.

View File

@ -896,7 +896,6 @@ func TestCreateSandboxConfigFail(t *testing.T) {
_, err = createSandbox(context.Background(), spec, runtimeConfig, testContainerID, bundlePath, testConsole, true, true)
assert.Error(err)
assert.False(vcmock.IsMockError(err))
}
func TestCreateCreateSandboxFail(t *testing.T) {

View File

@ -112,6 +112,10 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
}
}()
if err := s.getAndStoreGuestDetails(); err != nil {
return nil, err
}
// Create Containers
if err = s.createContainers(); err != nil {
return nil, err
@ -122,11 +126,6 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
return nil, err
}
// get and store guest details
if err := s.getAndStoreGuestDetails(); err != nil {
return nil, err
}
return s, nil
}

View File

@ -178,7 +178,7 @@ type ContainerResources struct {
VCPUs uint32
// Mem is the memory that is being used by the container
MemMB uint32
MemByte int64
}
// ContainerConfig describes one container runtime configuration.
@ -984,13 +984,19 @@ func (c *Container) update(resources specs.LinuxResources) error {
newResources := ContainerResources{
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
MemMB: uint32(*resources.Memory.Limit >> 20),
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect.
// TODO use GetGuestDetails to get the guest OS page size.
MemByte: (*resources.Memory.Limit >> 12) << 12,
}
if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
return err
}
if err := c.storeContainer(); err != nil {
return err
}
return c.sandbox.agent.updateContainer(c.sandbox, *c, resources)
}
@ -1181,43 +1187,23 @@ func (c *Container) detachDevices() error {
}
func (c *Container) addResources() error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
if c.config == nil {
return nil
}
// Container is being created, try to add the number of vCPUs specified
vCPUs := c.config.Resources.VCPUs
if vCPUs != 0 {
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
if err != nil {
return err
}
addResources := ContainerResources{
VCPUs: c.config.Resources.VCPUs,
MemByte: c.config.Resources.MemByte,
}
vcpusAdded, ok := data.(uint32)
if !ok {
return fmt.Errorf("Could not get the number of vCPUs added, got %+v", data)
}
// A different number of vCPUs was added, we have to update
// the resources in order to don't remove vCPUs used by other containers.
if vcpusAdded != vCPUs {
// Set and save container's config
c.config.Resources.VCPUs = vcpusAdded
if err := c.storeContainer(); err != nil {
return err
}
}
return c.sandbox.agent.onlineCPUMem(vcpusAdded, true)
if err := c.updateResources(ContainerResources{0, 0}, addResources); err != nil {
return err
}
return nil
}
func (c *Container) removeResources() error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
if c.config == nil {
return nil
}
@ -1237,11 +1223,12 @@ func (c *Container) removeResources() error {
return err
}
}
// hot remove memory unsupported
return nil
}
func (c *Container) updateVCPUResources(oldResources, newResources ContainerResources) error {
func (c *Container) updateVCPUResources(oldResources ContainerResources, newResources *ContainerResources) error {
var vCPUs uint32
oldVCPUs := oldResources.VCPUs
newVCPUs := newResources.VCPUs
@ -1254,9 +1241,7 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
"new-vcpus": fmt.Sprintf("%d", newVCPUs),
}).Debug("the actual number of vCPUs will not be modified")
return nil
}
if oldVCPUs < newVCPUs {
} else if oldVCPUs < newVCPUs {
// hot add vCPUs
vCPUs = newVCPUs - oldVCPUs
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
@ -1288,39 +1273,39 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
// recalculate the actual number of vCPUs if a different number of vCPUs was removed
newResources.VCPUs = oldVCPUs - vcpusRemoved
}
return nil
}
func (c *Container) memHotplugValid(mem uint32) (uint32, error) {
memorySectionSizeMB := c.sandbox.state.GuestMemoryBlockSizeMB
if memorySectionSizeMB == 0 {
return mem, nil
// calculate hotplug memory size with memory block size of guestos
func (c *Container) calcHotplugMemMiBSize(memByte int64) (uint32, error) {
memoryBlockSize := int64(c.sandbox.state.GuestMemoryBlockSizeMB)
if memoryBlockSize == 0 {
return uint32(memByte >> 20), nil
}
// TODO: hot add memory aligned to memory section should be more properly. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
return uint32(int64(math.Ceil(float64(memByte)/float64(memoryBlockSize<<20))) * memoryBlockSize), nil
}
func (c *Container) updateMemoryResources(oldResources ContainerResources, newResources *ContainerResources) error {
oldMemMB := oldResources.MemMB
newMemMB := newResources.MemMB
oldMemByte := oldResources.MemByte
newMemByte := newResources.MemByte
c.Logger().WithFields(logrus.Fields{
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
"new-mem": fmt.Sprintf("%dMB", newMemMB),
"old-mem": fmt.Sprintf("%dByte", oldMemByte),
"new-mem": fmt.Sprintf("%dByte", newMemByte),
}).Debug("Request update memory")
if oldMemMB == newMemMB {
if oldMemByte == newMemByte {
c.Logger().WithFields(logrus.Fields{
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
"new-mem": fmt.Sprintf("%dMB", newMemMB),
"old-mem": fmt.Sprintf("%dByte", oldMemByte),
"new-mem": fmt.Sprintf("%dByte", newMemByte),
}).Debug("the actual number of Mem will not be modified")
return nil
}
if oldMemMB < newMemMB {
} else if oldMemByte < newMemByte {
// hot add memory
addMemMB := newMemMB - oldMemMB
memHotplugMB, err := c.memHotplugValid(addMemMB)
addMemByte := newMemByte - oldMemByte
memHotplugMB, err := c.calcHotplugMemMiBSize(addMemByte)
if err != nil {
return err
}
@ -1337,16 +1322,15 @@ func (c *Container) updateMemoryResources(oldResources ContainerResources, newRe
if !ok {
return fmt.Errorf("Could not get the memory added, got %+v", data)
}
newResources.MemMB = oldMemMB + uint32(memoryAdded)
newResources.MemByte = oldMemByte + int64(memoryAdded)<<20
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
return err
}
}
if oldMemMB > newMemMB {
} else {
// Try to remove a memory device with the difference
// from new memory and old memory
removeMem := &memoryDevice{
sizeMB: int(oldMemMB - newMemMB),
sizeMB: int((oldMemByte - newMemByte) >> 20),
}
data, err := c.sandbox.hypervisor.hotplugRemoveDevice(removeMem, memoryDev)
@ -1357,39 +1341,34 @@ func (c *Container) updateMemoryResources(oldResources ContainerResources, newRe
if !ok {
return fmt.Errorf("Could not get the memory added, got %+v", data)
}
newResources.MemMB = oldMemMB - uint32(memoryRemoved)
newResources.MemMB = oldResources.MemMB
newResources.MemByte = oldMemByte - int64(memoryRemoved)<<20
}
return nil
}
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
// initialize with oldResources
c.config.Resources.VCPUs = oldResources.VCPUs
c.config.Resources.MemMB = oldResources.MemMB
c.config.Resources.MemByte = oldResources.MemByte
// Cpu is not updated if period and/or quota not set
if newResources.VCPUs != 0 {
if err := c.updateVCPUResources(oldResources, newResources); err != nil {
if err := c.updateVCPUResources(oldResources, &newResources); err != nil {
return err
}
// Set and save container's config VCPUs field only
// Set container's config VCPUs field only
c.config.Resources.VCPUs = newResources.VCPUs
if err := c.storeContainer(); err != nil {
return err
}
}
// Memory is not updated if memory limit not set
if newResources.MemMB != 0 {
if newResources.MemByte != 0 {
if err := c.updateMemoryResources(oldResources, &newResources); err != nil {
return err
}
// Set and save container's config MemMB field only
c.config.Resources.MemMB = newResources.MemMB
return c.storeContainer()
// Set container's config MemByte field only
c.config.Resources.MemByte = newResources.MemByte
}
return nil

View File

@ -320,15 +320,15 @@ func TestContainerAddResources(t *testing.T) {
assert.Nil(err)
vCPUs := uint32(5)
memByte := int64(104857600)
c.config.Resources = ContainerResources{
VCPUs: vCPUs,
VCPUs: vCPUs,
MemByte: memByte,
}
c.sandbox = &Sandbox{
hypervisor: &mockHypervisor{
vCPUs: vCPUs,
},
agent: &noopAgent{},
storage: &filesystem{},
hypervisor: &mockHypervisor{},
agent: &noopAgent{},
storage: &filesystem{},
}
err = c.addResources()
assert.Nil(err)
@ -361,16 +361,94 @@ func TestContainerRemoveResources(t *testing.T) {
}
c.sandbox = &Sandbox{
hypervisor: &mockHypervisor{
vCPUs: vCPUs,
},
storage: &filesystem{},
hypervisor: &mockHypervisor{},
storage: &filesystem{},
}
err = c.removeResources()
assert.Nil(err)
}
func TestContainerUpdateResources(t *testing.T) {
assert := assert.New(t)
sandbox := &Sandbox{
hypervisor: &mockHypervisor{},
agent: &noopAgent{},
storage: &filesystem{},
}
c := &Container{
sandbox: sandbox,
}
c.config = &ContainerConfig{Annotations: make(map[string]string)}
// VCPUs is equal to zero
oldResource := ContainerResources{
VCPUs: 0,
MemByte: 0,
}
newResource := ContainerResources{
VCPUs: 0,
MemByte: 104857600,
}
err := c.updateResources(oldResource, newResource)
assert.Nil(err)
// MemByte is equal to zero
newResource = ContainerResources{
VCPUs: 5,
MemByte: 0,
}
err = c.updateResources(oldResource, newResource)
assert.Nil(err)
// oldResource is equal to newResource
oldResource = ContainerResources{
VCPUs: 5,
MemByte: 104857600,
}
newResource = ContainerResources{
VCPUs: 5,
MemByte: 104857600,
}
err = c.updateResources(oldResource, newResource)
assert.Nil(err)
// memory hotplug and cpu hotplug
oldResource = ContainerResources{
VCPUs: 5,
MemByte: 104857600,
}
newResource = ContainerResources{
VCPUs: 10,
MemByte: 209715200,
}
err = c.updateResources(oldResource, newResource)
assert.Nil(err)
// memory hot remove and cpu hot remove
oldResource = ContainerResources{
VCPUs: 10,
MemByte: 209715200,
}
newResource = ContainerResources{
VCPUs: 5,
MemByte: 104857600,
}
err = c.updateResources(oldResource, newResource)
assert.Nil(err)
}
func TestContainerEnterErrorsOnContainerStates(t *testing.T) {
assert := assert.New(t)
c := &Container{

View File

@ -8,7 +8,6 @@ package virtcontainers
import "context"
type mockHypervisor struct {
vCPUs uint32
}
func (m *mockHypervisor) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, storage resourceStorage) error {
@ -63,7 +62,7 @@ func (m *mockHypervisor) addDevice(devInfo interface{}, devType deviceType) erro
func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
switch devType {
case cpuDev:
return m.vCPUs, nil
return devInfo.(uint32), nil
case memoryDev:
memdev := devInfo.(*memoryDevice)
return memdev.sizeMB, nil
@ -74,7 +73,9 @@ func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceTyp
func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
switch devType {
case cpuDev:
return m.vCPUs, nil
return devInfo.(uint32), nil
case memoryDev:
return 0, nil
}
return nil, nil
}

View File

@ -412,25 +412,6 @@ func (spec *CompatOCISpec) SandboxID() (string, error) {
return "", fmt.Errorf("Could not find sandbox ID")
}
func updateVMConfig(ocispec CompatOCISpec, config *RuntimeConfig) error {
if ocispec.Linux == nil || ocispec.Linux.Resources == nil {
return nil
}
if ocispec.Linux.Resources.Memory != nil &&
ocispec.Linux.Resources.Memory.Limit != nil {
memBytes := *ocispec.Linux.Resources.Memory.Limit
if memBytes <= 0 {
return fmt.Errorf("Invalid OCI memory limit %d", memBytes)
}
// Use some math magic to round up to the nearest Mb.
// This has the side effect that we can never have <1Mb assigned.
config.HypervisorConfig.MemorySize = uint32((memBytes + (1024*1024 - 1)) / (1024 * 1024))
}
return nil
}
func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
assetAnnotations := []string{
vcAnnotations.KernelPath,
@ -469,11 +450,6 @@ func SandboxConfig(ocispec CompatOCISpec, runtime RuntimeConfig, bundlePath, cid
return vc.SandboxConfig{}, err
}
err = updateVMConfig(ocispec, &runtime)
if err != nil {
return vc.SandboxConfig{}, err
}
ociSpecJSON, err := json.Marshal(ocispec)
if err != nil {
return vc.SandboxConfig{}, err
@ -570,7 +546,9 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
}
if ocispec.Linux.Resources.Memory != nil {
if ocispec.Linux.Resources.Memory.Limit != nil {
resources.MemMB = uint32(*ocispec.Linux.Resources.Memory.Limit >> 20)
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect
// TODO use GetGuestDetails to get the guest OS page size.
resources.MemByte = (*ocispec.Linux.Resources.Memory.Limit >> 12) << 12
}
}

View File

@ -254,54 +254,6 @@ func TestMinimalSandboxConfig(t *testing.T) {
}
}
func TestUpdateVmConfig(t *testing.T) {
var limitBytes int64 = 128 * 1024 * 1024
assert := assert.New(t)
config := RuntimeConfig{
HypervisorConfig: vc.HypervisorConfig{
MemorySize: 2048,
},
}
expectedMem := uint32(128)
ocispec := CompatOCISpec{
Spec: specs.Spec{
Linux: &specs.Linux{
Resources: &specs.LinuxResources{
Memory: &specs.LinuxMemory{
Limit: &limitBytes,
},
},
},
},
}
err := updateVMConfig(ocispec, &config)
assert.Nil(err)
assert.Equal(config.HypervisorConfig.MemorySize, expectedMem)
limitBytes = -128 * 1024 * 1024
ocispec.Linux.Resources.Memory.Limit = &limitBytes
err = updateVMConfig(ocispec, &config)
assert.NotNil(err)
// Test case when Memory is nil
ocispec.Spec.Linux.Resources.Memory = nil
err = updateVMConfig(ocispec, &config)
assert.Nil(err)
// Test case when CPU is nil
ocispec.Spec.Linux.Resources.CPU = nil
limitBytes = 20
ocispec.Linux.Resources.Memory = &specs.LinuxMemory{Limit: &limitBytes}
err = updateVMConfig(ocispec, &config)
assert.Nil(err)
assert.NotEqual(config.HypervisorConfig.MemorySize, expectedMem)
}
func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
ociState := StatusToOCIState(cStatus)