clh: Add memory hotplug

Request memory to resize memory to hypervisor.

Depends-on: github.com/kata-containers/tests#2413

Fixes: #2526

Signed-off-by: Jose Carlos Venegas Munoz <jose.carlos.venegas.munoz@intel.com>
This commit is contained in:
Jose Carlos Venegas Munoz 2020-03-24 23:33:50 +00:00
parent 2f948738e4
commit aab82f6745
2 changed files with 130 additions and 2 deletions

View File

@ -217,6 +217,13 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
// Convert to int64 openApiClient only support int64
clh.vmconfig.Memory.Size = int64((utils.MemUnit(clh.config.MemorySize) * utils.MiB).ToBytes())
clh.vmconfig.Memory.File = "/dev/shm"
hostMemKb, err := getHostMemorySizeKb(procMemInfo)
if err != nil {
return nil
}
// OpenAPI only supports int64 values
clh.vmconfig.Memory.HotplugSize = int64((utils.MemUnit(hostMemKb) * utils.KiB).ToBytes())
// Set initial amount of cpu's for the virtual machine
clh.vmconfig.Cpus = chclient.CpusConfig{
// cast to int32, as openAPI has a limitation that it does not support unsigned values
@ -425,8 +432,68 @@ func (clh *cloudHypervisor) hypervisorConfig() HypervisorConfig {
}
func (clh *cloudHypervisor) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
clh.Logger().WithField("function", "resizeMemory").Warn("not supported")
return 0, memoryDevice{}, nil
// TODO: Add support for virtio-mem
if probe {
return 0, memoryDevice{}, errors.New("probe memory is not supported for cloud-hypervisor")
}
if reqMemMB == 0 {
// This is a corner case if requested to resize to 0 means something went really wrong.
return 0, memoryDevice{}, errors.New("Can not resize memory to 0")
}
info, err := clh.vmInfo()
if err != nil {
return 0, memoryDevice{}, err
}
currentMem := utils.MemUnit(info.Config.Memory.Size) * utils.Byte
newMem := utils.MemUnit(reqMemMB) * utils.MiB
// Early check to verify if boot memory is the same as requested
if currentMem == newMem {
clh.Logger().WithField("memory", reqMemMB).Debugf("VM already has requested memory")
return uint32(currentMem.ToMiB()), memoryDevice{}, nil
}
if currentMem > newMem {
clh.Logger().Warn("Remove memory is not supported, nothing to do")
return uint32(currentMem.ToMiB()), memoryDevice{}, nil
}
blockSize := utils.MemUnit(memoryBlockSizeMB) * utils.MiB
hotplugSize := (newMem - currentMem).AlignMem(blockSize)
// Update memory request to increase memory aligned block
alignedRequest := currentMem + hotplugSize
if newMem != alignedRequest {
clh.Logger().WithFields(log.Fields{"request": newMem, "aligned-request": alignedRequest}).Debug("aligning VM memory request")
newMem = alignedRequest
}
// Check if memory is the same as requested, a second check is done
// to consider the memory request now that is updated to be memory aligned
if currentMem == newMem {
clh.Logger().WithFields(log.Fields{"current-memory": currentMem, "new-memory": newMem}).Debug("VM already has requested memory(after alignment)")
return uint32(currentMem.ToMiB()), memoryDevice{}, nil
}
cl := clh.client()
ctx, cancelResize := context.WithTimeout(context.Background(), clhAPITimeout*time.Second)
defer cancelResize()
// OpenApi does not support uint64, convert to int64
resize := chclient.VmResize{DesiredRam: int64(newMem.ToBytes())}
clh.Logger().WithFields(log.Fields{"current-memory": currentMem, "new-memory": newMem}).Debug("updating VM memory")
if _, err = cl.VmResizePut(ctx, resize); err != nil {
clh.Logger().WithFields(log.Fields{"current-memory": currentMem, "new-memory": newMem}).Warnf("failed to update memory %s", openAPIClientError(err))
err = fmt.Errorf("Failed to resize memory from %d to %d: %s", currentMem, newMem, openAPIClientError(err))
return uint32(currentMem.ToMiB()), memoryDevice{}, openAPIClientError(err)
}
return uint32(newMem.ToMiB()), memoryDevice{sizeMB: int(hotplugSize.ToMiB())}, nil
}
func (clh *cloudHypervisor) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {

View File

@ -10,15 +10,22 @@ import (
"net/http"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/persist"
chclient "github.com/kata-containers/runtime/virtcontainers/pkg/cloud-hypervisor/client"
"github.com/kata-containers/runtime/virtcontainers/utils"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
const (
FAIL = true
PASS = !FAIL
)
func newClhConfig() (HypervisorConfig, error) {
setupClh()
@ -255,3 +262,57 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
err = clh.startSandbox(10)
assert.NoError(err)
}
func TestCloudHypervisorResizeMemory(t *testing.T) {
assert := assert.New(t)
clhConfig, err := newClhConfig()
type args struct {
reqMemMB uint32
memoryBlockSizeMB uint32
}
tests := []struct {
name string
args args
expectedMemDev memoryDevice
wantErr bool
}{
{"Resize to zero", args{0, 128}, memoryDevice{probe: false, sizeMB: 0}, FAIL},
{"Resize to aligned size", args{clhConfig.MemorySize + 128, 128}, memoryDevice{probe: false, sizeMB: 128}, PASS},
{"Resize to aligned size", args{clhConfig.MemorySize + 129, 128}, memoryDevice{probe: false, sizeMB: 256}, PASS},
{"Resize to NOT aligned size", args{clhConfig.MemorySize + 125, 128}, memoryDevice{probe: false, sizeMB: 128}, PASS},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.NoError(err)
clh := cloudHypervisor{}
mockClient := &clhClientMock{}
mockClient.vmInfo.Config.Memory.Size = int64(utils.MemUnit(clhConfig.MemorySize) * utils.MiB)
mockClient.vmInfo.Config.Memory.HotplugSize = int64(40 * utils.GiB.ToBytes())
clh.APIClient = mockClient
clh.config = clhConfig
newMem, memDev, err := clh.resizeMemory(tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false)
if (err != nil) != tt.wantErr {
t.Errorf("cloudHypervisor.resizeMemory() error = %v, expected to fail = %v", err, tt.wantErr)
return
}
if err != nil {
return
}
expectedMem := clhConfig.MemorySize + uint32(tt.expectedMemDev.sizeMB)
if newMem != expectedMem {
t.Errorf("cloudHypervisor.resizeMemory() got = %+v, want %+v", newMem, expectedMem)
}
if !reflect.DeepEqual(memDev, tt.expectedMemDev) {
t.Errorf("cloudHypervisor.resizeMemory() got = %+v, want %+v", memDev, tt.expectedMemDev)
}
})
}
}