mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-07-03 18:47:03 +00:00
virtcontainers: hotplug memory with kata-runtime update command
Add support for using update command to hotplug memory to vm. Connect kata-runtime update interface with hypervisor memory hotplug feature. Fixes #625 Signed-off-by: Clare Chen <clare.chenhui@huawei.com>
This commit is contained in:
parent
0928519132
commit
13bf7d1bbc
@ -207,7 +207,8 @@ type agent interface {
|
||||
// onlineCPUMem will online CPUs and Memory inside the Sandbox.
|
||||
// This function should be called after hot adding vCPUs or Memory.
|
||||
// cpus specifies the number of CPUs that were added and the agent should online
|
||||
onlineCPUMem(cpus uint32) error
|
||||
// cpuOnly specifies that we should online cpu or online memory or both
|
||||
onlineCPUMem(cpus uint32, cpuOnly bool) error
|
||||
|
||||
// statsContainer will tell the agent to get stats from a container related to a Sandbox
|
||||
statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error)
|
||||
|
@ -2330,12 +2330,18 @@ func TestUpdateContainer(t *testing.T) {
|
||||
|
||||
period := uint64(1000)
|
||||
quota := int64(2000)
|
||||
memoryLimit := int64(1073741824)
|
||||
memorySwap := int64(1073741824)
|
||||
assert := assert.New(t)
|
||||
resources := specs.LinuxResources{
|
||||
CPU: &specs.LinuxCPU{
|
||||
Period: &period,
|
||||
Quota: "a,
|
||||
},
|
||||
Memory: &specs.LinuxMemory{
|
||||
Limit: &memoryLimit,
|
||||
Swap: &memorySwap,
|
||||
},
|
||||
}
|
||||
err := UpdateContainer(ctx, "", "", resources)
|
||||
assert.Error(err)
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
@ -177,7 +178,7 @@ type ContainerResources struct {
|
||||
VCPUs uint32
|
||||
|
||||
// Mem is the memory that is being used by the container
|
||||
Mem uint32
|
||||
MemMB uint32
|
||||
}
|
||||
|
||||
// ContainerConfig describes one container runtime configuration.
|
||||
@ -991,6 +992,7 @@ func (c *Container) update(resources specs.LinuxResources) error {
|
||||
|
||||
newResources := ContainerResources{
|
||||
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
|
||||
MemMB: uint32(*resources.Memory.Limit >> 20),
|
||||
}
|
||||
|
||||
if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
|
||||
@ -1216,7 +1218,7 @@ func (c *Container) addResources() error {
|
||||
}
|
||||
}
|
||||
|
||||
return c.sandbox.agent.onlineCPUMem(vcpusAdded)
|
||||
return c.sandbox.agent.onlineCPUMem(vcpusAdded, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1247,16 +1249,14 @@ func (c *Container) removeResources() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
|
||||
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
|
||||
func (c *Container) updateVCPUResources(oldResources, newResources ContainerResources) error {
|
||||
var vCPUs uint32
|
||||
oldVCPUs := oldResources.VCPUs
|
||||
newVCPUs := newResources.VCPUs
|
||||
|
||||
// Update vCPUs is not possible if period and/or quota are not set or
|
||||
// oldVCPUs and newVCPUs are equal.
|
||||
// Update vCPUs is not possible if oldVCPUs and newVCPUs are equal.
|
||||
// Don't fail, the constraint still can be applied in the cgroup.
|
||||
if newVCPUs == 0 || oldVCPUs == newVCPUs {
|
||||
if oldVCPUs == newVCPUs {
|
||||
c.Logger().WithFields(logrus.Fields{
|
||||
"old-vcpus": fmt.Sprintf("%d", oldVCPUs),
|
||||
"new-vcpus": fmt.Sprintf("%d", newVCPUs),
|
||||
@ -1278,7 +1278,7 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
|
||||
}
|
||||
// recalculate the actual number of vCPUs if a different number of vCPUs was added
|
||||
newResources.VCPUs = oldVCPUs + vcpusAdded
|
||||
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded); err != nil {
|
||||
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded, true); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@ -1296,8 +1296,82 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
|
||||
// recalculate the actual number of vCPUs if a different number of vCPUs was removed
|
||||
newResources.VCPUs = oldVCPUs - vcpusRemoved
|
||||
}
|
||||
|
||||
// Set and save container's config
|
||||
c.config.Resources = newResources
|
||||
return c.storeContainer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) memHotplugValid(mem *uint32) error {
|
||||
// TODO: make memory aligned to correct memory boundary according to different architecture
|
||||
const memorySectionSizeMB = 128
|
||||
// TODO: make hot add memory to be aligned to memory section in more proper way. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
|
||||
*mem = uint32(math.Ceil(float64(*mem)/memorySectionSizeMB)) * memorySectionSizeMB
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) updateMemoryResources(oldResources, newResources ContainerResources) error {
|
||||
oldMemMB := oldResources.MemMB
|
||||
newMemMB := newResources.MemMB
|
||||
|
||||
if oldMemMB == newMemMB {
|
||||
c.Logger().WithFields(logrus.Fields{
|
||||
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
|
||||
"new-mem": fmt.Sprintf("%dMB", newMemMB),
|
||||
}).Debug("the actual number of Mem will not be modified")
|
||||
return nil
|
||||
}
|
||||
|
||||
if oldMemMB < newMemMB {
|
||||
// hot add memory
|
||||
addMemMB := newMemMB - oldMemMB
|
||||
if err := c.memHotplugValid(&addMemMB); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtLog.Debugf("hot adding %dMB mem", addMemMB)
|
||||
addMemDevice := &memoryDevice{
|
||||
sizeMB: int(addMemMB),
|
||||
}
|
||||
_, err := c.sandbox.hypervisor.hotplugAddDevice(addMemDevice, memoryDev)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newResources.MemMB = newMemMB
|
||||
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// hot remove memory unsupported
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
|
||||
// initialize with oldResources
|
||||
c.config.Resources.VCPUs = oldResources.VCPUs
|
||||
c.config.Resources.MemMB = oldResources.MemMB
|
||||
|
||||
// Cpu is not updated if period and/or quota not set
|
||||
if newResources.VCPUs != 0 {
|
||||
if err := c.updateVCPUResources(oldResources, newResources); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set and save container's config VCPUs field only
|
||||
c.config.Resources.VCPUs = newResources.VCPUs
|
||||
if err := c.storeContainer(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Memory is not updated if memory limit not set
|
||||
if newResources.MemMB != 0 {
|
||||
if err := c.updateMemoryResources(oldResources, newResources); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set and save container's config MemMB field only
|
||||
c.config.Resources.MemMB = newResources.MemMB
|
||||
return c.storeContainer()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -883,7 +883,7 @@ func (h *hyper) sendCmd(proxyCmd hyperstartProxyCmd) (interface{}, error) {
|
||||
return h.client.HyperWithTokens(proxyCmd.cmd, tokens, proxyCmd.message)
|
||||
}
|
||||
|
||||
func (h *hyper) onlineCPUMem(cpus uint32) error {
|
||||
func (h *hyper) onlineCPUMem(cpus uint32, cpuOnly bool) error {
|
||||
// hyperstart-agent uses udev to online CPUs automatically
|
||||
return nil
|
||||
}
|
||||
|
@ -1234,10 +1234,11 @@ func (k *kataAgent) resumeContainer(sandbox *Sandbox, c Container) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) onlineCPUMem(cpus uint32) error {
|
||||
func (k *kataAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error {
|
||||
req := &grpc.OnlineCPUMemRequest{
|
||||
Wait: false,
|
||||
NbCpus: cpus,
|
||||
Wait: false,
|
||||
NbCpus: cpus,
|
||||
CpuOnly: cpuOnly,
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
|
@ -317,7 +317,7 @@ func TestKataAgentSendReq(t *testing.T) {
|
||||
err = k.resumeContainer(sandbox, Container{})
|
||||
assert.Nil(err)
|
||||
|
||||
err = k.onlineCPUMem(1)
|
||||
err = k.onlineCPUMem(1, true)
|
||||
assert.Nil(err)
|
||||
|
||||
_, err = k.statsContainer(sandbox, Container{})
|
||||
|
@ -94,7 +94,7 @@ func (n *noopAgent) updateContainer(sandbox *Sandbox, c Container, resources spe
|
||||
}
|
||||
|
||||
// onlineCPUMem is the Noop agent Container online CPU and Memory implementation. It does nothing.
|
||||
func (n *noopAgent) onlineCPUMem(cpus uint32) error {
|
||||
func (n *noopAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -566,6 +566,11 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
|
||||
resources.VCPUs = uint32(utils.ConstraintsToVCPUs(*ocispec.Linux.Resources.CPU.Quota, *ocispec.Linux.Resources.CPU.Period))
|
||||
}
|
||||
}
|
||||
if ocispec.Linux.Resources.Memory != nil {
|
||||
if ocispec.Linux.Resources.Memory.Limit != nil {
|
||||
resources.MemMB = uint32(*ocispec.Linux.Resources.Memory.Limit >> 20)
|
||||
}
|
||||
}
|
||||
|
||||
containerConfig := vc.ContainerConfig{
|
||||
ID: cid,
|
||||
|
@ -1044,6 +1044,11 @@ func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error {
|
||||
return errors.New("cannot hot unplug memory device")
|
||||
}
|
||||
|
||||
err := q.qmpSetup()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maxMem, err := q.hostMemMB()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1058,16 +1063,20 @@ func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error {
|
||||
memDev.sizeMB, currentMemory, q.config.MemorySize)
|
||||
}
|
||||
|
||||
memoryDevices, err := q.qmpMonitorCh.qmp.ExecQueryMemoryDevices(q.qmpMonitorCh.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query memory devices: %v", err)
|
||||
}
|
||||
|
||||
if len(memoryDevices) != 0 {
|
||||
memDev.slot = memoryDevices[len(memoryDevices)-1].Data.Slot + 1
|
||||
}
|
||||
|
||||
return q.hotplugAddMemory(memDev)
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugAddMemory(memDev *memoryDevice) error {
|
||||
err := q.qmpSetup()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = q.qmpMonitorCh.qmp.ExecHotplugMemory(q.qmpMonitorCh.ctx, "memory-backend-ram", "mem"+strconv.Itoa(memDev.slot), "", memDev.sizeMB)
|
||||
err := q.qmpMonitorCh.qmp.ExecHotplugMemory(q.qmpMonitorCh.ctx, "memory-backend-ram", "mem"+strconv.Itoa(memDev.slot), "", memDev.sizeMB)
|
||||
if err != nil {
|
||||
q.Logger().WithError(err).Error("hotplug memory")
|
||||
return err
|
||||
|
@ -268,7 +268,7 @@ func (v *VM) AddMemory(numMB uint32) error {
|
||||
// OnlineCPUMemory puts the hotplugged CPU and memory online.
|
||||
func (v *VM) OnlineCPUMemory() error {
|
||||
v.logger().Infof("online CPU %d and memory", v.cpuDelta)
|
||||
err := v.agent.onlineCPUMem(v.cpuDelta)
|
||||
err := v.agent.onlineCPUMem(v.cpuDelta, false)
|
||||
if err == nil {
|
||||
v.cpuDelta = 0
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user