Merge pull request #470 from bergwolf/memory-hotplug

hypervisor/qemu: add memory hotplug support
This commit is contained in:
Sebastien Boeuf 2018-07-09 09:56:19 -07:00 committed by GitHub
commit a8952fb79b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 511 additions and 76 deletions

7
Gopkg.lock generated
View File

@ -84,14 +84,15 @@
version = "v1.0.0" version = "v1.0.0"
[[projects]] [[projects]]
branch = "master"
name = "github.com/hashicorp/yamux" name = "github.com/hashicorp/yamux"
packages = ["."] packages = ["."]
revision = "f5742cb6b85602e7fa834e9d5d91a7d7fa850824" revision = "3520598351bb3500a49ae9563f5539666ae0a27c"
[[projects]] [[projects]]
name = "github.com/intel/govmm" name = "github.com/intel/govmm"
packages = ["qemu"] packages = ["qemu"]
revision = "9cf8ce6c6dda19d4a6d529e73714e231f6156820" revision = "ff2401825e0930811919c86c36d64b113aa00083"
[[projects]] [[projects]]
name = "github.com/kata-containers/agent" name = "github.com/kata-containers/agent"
@ -263,6 +264,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "a8e90901b945488c3b660e20c076fce3345dba96b4ec15e7ca00b8a06baa16a3" inputs-digest = "ea3d6532c4375832a1c79d70af45e6722e526bde97f6caf23d90b91267a3cf0b"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -56,7 +56,7 @@
[[constraint]] [[constraint]]
name = "github.com/intel/govmm" name = "github.com/intel/govmm"
revision = "9cf8ce6c6dda19d4a6d529e73714e231f6156820" revision = "ff2401825e0930811919c86c36d64b113aa00083"
[[constraint]] [[constraint]]
name = "github.com/kata-containers/agent" name = "github.com/kata-containers/agent"
@ -70,11 +70,11 @@
name = "github.com/safchain/ethtool" name = "github.com/safchain/ethtool"
revision = "79559b488d8848b53a8e34c330140c3fc37ee246" revision = "79559b488d8848b53a8e34c330140c3fc37ee246"
[[override]]
branch = "master"
name = "github.com/hashicorp/yamux"
[prune] [prune]
non-go = true non-go = true
go-tests = true go-tests = true
unused-packages = true unused-packages = true
[[constraint]]
branch = "master"
name = "github.com/hashicorp/yamux"

35
vendor/github.com/docker/go-units/duration.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Package units provides helper function to parse and print size and time units
// in human-readable format.
package units
import (
"fmt"
"time"
)
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.).
func HumanDuration(d time.Duration) string {
if seconds := int(d.Seconds()); seconds < 1 {
return "Less than a second"
} else if seconds == 1 {
return "1 second"
} else if seconds < 60 {
return fmt.Sprintf("%d seconds", seconds)
} else if minutes := int(d.Minutes()); minutes == 1 {
return "About a minute"
} else if minutes < 46 {
return fmt.Sprintf("%d minutes", minutes)
} else if hours := int(d.Hours() + 0.5); hours == 1 {
return "About an hour"
} else if hours < 48 {
return fmt.Sprintf("%d hours", hours)
} else if hours < 24*7*2 {
return fmt.Sprintf("%d days", hours/24)
} else if hours < 24*30*2 {
return fmt.Sprintf("%d weeks", hours/24/7)
} else if hours < 24*365*2 {
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", int(d.Hours())/24/365)
}

118
vendor/github.com/docker/go-units/ulimit.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
package units
import (
"fmt"
"strconv"
"strings"
)
// Ulimit is a human friendly version of Rlimit.
type Ulimit struct {
Name string
Hard int64
Soft int64
}
// Rlimit specifies the resource limits, such as max open files.
type Rlimit struct {
Type int `json:"type,omitempty"`
Hard uint64 `json:"hard,omitempty"`
Soft uint64 `json:"soft,omitempty"`
}
const (
// magic numbers for making the syscall
// some of these are defined in the syscall package, but not all.
// Also since Windows client doesn't get access to the syscall package, need to
// define these here
rlimitAs = 9
rlimitCore = 4
rlimitCPU = 0
rlimitData = 2
rlimitFsize = 1
rlimitLocks = 10
rlimitMemlock = 8
rlimitMsgqueue = 12
rlimitNice = 13
rlimitNofile = 7
rlimitNproc = 6
rlimitRss = 5
rlimitRtprio = 14
rlimitRttime = 15
rlimitSigpending = 11
rlimitStack = 3
)
var ulimitNameMapping = map[string]int{
//"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
"core": rlimitCore,
"cpu": rlimitCPU,
"data": rlimitData,
"fsize": rlimitFsize,
"locks": rlimitLocks,
"memlock": rlimitMemlock,
"msgqueue": rlimitMsgqueue,
"nice": rlimitNice,
"nofile": rlimitNofile,
"nproc": rlimitNproc,
"rss": rlimitRss,
"rtprio": rlimitRtprio,
"rttime": rlimitRttime,
"sigpending": rlimitSigpending,
"stack": rlimitStack,
}
// ParseUlimit parses and returns a Ulimit from the specified string.
func ParseUlimit(val string) (*Ulimit, error) {
parts := strings.SplitN(val, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid ulimit argument: %s", val)
}
if _, exists := ulimitNameMapping[parts[0]]; !exists {
return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
}
var (
soft int64
hard = &soft // default to soft in case no hard was set
temp int64
err error
)
switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
case 2:
temp, err = strconv.ParseInt(limitVals[1], 10, 64)
if err != nil {
return nil, err
}
hard = &temp
fallthrough
case 1:
soft, err = strconv.ParseInt(limitVals[0], 10, 64)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
}
if soft > *hard {
return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
}
return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
}
// GetRlimit returns the RLimit corresponding to Ulimit.
func (u *Ulimit) GetRlimit() (*Rlimit, error) {
t, exists := ulimitNameMapping[u.Name]
if !exists {
return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
}
return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
}
func (u *Ulimit) String() string {
return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
}

View File

@ -123,6 +123,12 @@ func (s *Session) IsClosed() bool {
} }
} }
// CloseChan returns a read-only channel which is closed as
// soon as the session is closed.
func (s *Session) CloseChan() <-chan struct{} {
return s.shutdownCh
}
// NumStreams returns the number of currently open streams // NumStreams returns the number of currently open streams
func (s *Session) NumStreams() int { func (s *Session) NumStreams() int {
s.streamLock.Lock() s.streamLock.Lock()
@ -303,8 +309,10 @@ func (s *Session) keepalive() {
case <-time.After(s.config.KeepAliveInterval): case <-time.After(s.config.KeepAliveInterval):
_, err := s.Ping() _, err := s.Ping()
if err != nil { if err != nil {
s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) if err != ErrSessionShutdown {
s.exitErr(ErrKeepAliveTimeout) s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
s.exitErr(ErrKeepAliveTimeout)
}
return return
} }
case <-s.shutdownCh: case <-s.shutdownCh:
@ -323,8 +331,17 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error {
// potential shutdown. Since there's the expectation that sends can happen // potential shutdown. Since there's the expectation that sends can happen
// in a timely manner, we enforce the connection write timeout here. // in a timely manner, we enforce the connection write timeout here.
func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
timer := time.NewTimer(s.config.ConnectionWriteTimeout) t := timerPool.Get()
defer timer.Stop() timer := t.(*time.Timer)
timer.Reset(s.config.ConnectionWriteTimeout)
defer func() {
timer.Stop()
select {
case <-timer.C:
default:
}
timerPool.Put(t)
}()
ready := sendReady{Hdr: hdr, Body: body, Err: errCh} ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
select { select {
@ -349,8 +366,17 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e
// the send happens right here, we enforce the connection write timeout if we // the send happens right here, we enforce the connection write timeout if we
// can't queue the header to be sent. // can't queue the header to be sent.
func (s *Session) sendNoWait(hdr header) error { func (s *Session) sendNoWait(hdr header) error {
timer := time.NewTimer(s.config.ConnectionWriteTimeout) t := timerPool.Get()
defer timer.Stop() timer := t.(*time.Timer)
timer.Reset(s.config.ConnectionWriteTimeout)
defer func() {
timer.Stop()
select {
case <-timer.C:
default:
}
timerPool.Put(t)
}()
select { select {
case s.sendCh <- sendReady{Hdr: hdr}: case s.sendCh <- sendReady{Hdr: hdr}:
@ -408,11 +434,20 @@ func (s *Session) recv() {
} }
} }
// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type
var (
handlers = []func(*Session, header) error{
typeData: (*Session).handleStreamMessage,
typeWindowUpdate: (*Session).handleStreamMessage,
typePing: (*Session).handlePing,
typeGoAway: (*Session).handleGoAway,
}
)
// recvLoop continues to receive data until a fatal error is encountered // recvLoop continues to receive data until a fatal error is encountered
func (s *Session) recvLoop() error { func (s *Session) recvLoop() error {
defer close(s.recvDoneCh) defer close(s.recvDoneCh)
hdr := header(make([]byte, headerSize)) hdr := header(make([]byte, headerSize))
var handler func(header) error
for { for {
// Read the header // Read the header
if _, err := io.ReadFull(s.bufRead, hdr); err != nil { if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
@ -428,22 +463,12 @@ func (s *Session) recvLoop() error {
return ErrInvalidVersion return ErrInvalidVersion
} }
// Switch on the type mt := hdr.MsgType()
switch hdr.MsgType() { if mt < typeData || mt > typeGoAway {
case typeData:
handler = s.handleStreamMessage
case typeWindowUpdate:
handler = s.handleStreamMessage
case typeGoAway:
handler = s.handleGoAway
case typePing:
handler = s.handlePing
default:
return ErrInvalidMsgType return ErrInvalidMsgType
} }
// Invoke the handler if err := handlers[mt](s, hdr); err != nil {
if err := handler(hdr); err != nil {
return err return err
} }
} }

View File

@ -47,8 +47,8 @@ type Stream struct {
recvNotifyCh chan struct{} recvNotifyCh chan struct{}
sendNotifyCh chan struct{} sendNotifyCh chan struct{}
readDeadline time.Time readDeadline atomic.Value // time.Time
writeDeadline time.Time writeDeadline atomic.Value // time.Time
} }
// newStream is used to construct a new stream within // newStream is used to construct a new stream within
@ -67,6 +67,8 @@ func newStream(session *Session, id uint32, state streamState) *Stream {
recvNotifyCh: make(chan struct{}, 1), recvNotifyCh: make(chan struct{}, 1),
sendNotifyCh: make(chan struct{}, 1), sendNotifyCh: make(chan struct{}, 1),
} }
s.readDeadline.Store(time.Time{})
s.writeDeadline.Store(time.Time{})
return s return s
} }
@ -122,8 +124,9 @@ START:
WAIT: WAIT:
var timeout <-chan time.Time var timeout <-chan time.Time
var timer *time.Timer var timer *time.Timer
if !s.readDeadline.IsZero() { readDeadline := s.readDeadline.Load().(time.Time)
delay := s.readDeadline.Sub(time.Now()) if !readDeadline.IsZero() {
delay := readDeadline.Sub(time.Now())
timer = time.NewTimer(delay) timer = time.NewTimer(delay)
timeout = timer.C timeout = timer.C
} }
@ -188,7 +191,7 @@ START:
// Send the header // Send the header
s.sendHdr.encode(typeData, flags, s.id, max) s.sendHdr.encode(typeData, flags, s.id, max)
if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
return 0, err return 0, err
} }
@ -200,8 +203,9 @@ START:
WAIT: WAIT:
var timeout <-chan time.Time var timeout <-chan time.Time
if !s.writeDeadline.IsZero() { writeDeadline := s.writeDeadline.Load().(time.Time)
delay := s.writeDeadline.Sub(time.Now()) if !writeDeadline.IsZero() {
delay := writeDeadline.Sub(time.Now())
timeout = time.After(delay) timeout = time.After(delay)
} }
select { select {
@ -238,18 +242,25 @@ func (s *Stream) sendWindowUpdate() error {
// Determine the delta update // Determine the delta update
max := s.session.config.MaxStreamWindowSize max := s.session.config.MaxStreamWindowSize
delta := max - atomic.LoadUint32(&s.recvWindow) var bufLen uint32
s.recvLock.Lock()
if s.recvBuf != nil {
bufLen = uint32(s.recvBuf.Len())
}
delta := (max - bufLen) - s.recvWindow
// Determine the flags if any // Determine the flags if any
flags := s.sendFlags() flags := s.sendFlags()
// Check if we can omit the update // Check if we can omit the update
if delta < (max/2) && flags == 0 { if delta < (max/2) && flags == 0 {
s.recvLock.Unlock()
return nil return nil
} }
// Update our window // Update our window
atomic.AddUint32(&s.recvWindow, delta) s.recvWindow += delta
s.recvLock.Unlock()
// Send the header // Send the header
s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
@ -392,16 +403,18 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
if length == 0 { if length == 0 {
return nil return nil
} }
if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
return ErrRecvWindowExceeded
}
// Wrap in a limited reader // Wrap in a limited reader
conn = &io.LimitedReader{R: conn, N: int64(length)} conn = &io.LimitedReader{R: conn, N: int64(length)}
// Copy into buffer // Copy into buffer
s.recvLock.Lock() s.recvLock.Lock()
if length > s.recvWindow {
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length)
return ErrRecvWindowExceeded
}
if s.recvBuf == nil { if s.recvBuf == nil {
// Allocate the receive buffer just-in-time to fit the full data frame. // Allocate the receive buffer just-in-time to fit the full data frame.
// This way we can read in the whole packet without further allocations. // This way we can read in the whole packet without further allocations.
@ -414,7 +427,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
} }
// Decrement the receive window // Decrement the receive window
atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) s.recvWindow -= length
s.recvLock.Unlock() s.recvLock.Unlock()
// Unblock any readers // Unblock any readers
@ -435,13 +448,13 @@ func (s *Stream) SetDeadline(t time.Time) error {
// SetReadDeadline sets the deadline for future Read calls. // SetReadDeadline sets the deadline for future Read calls.
func (s *Stream) SetReadDeadline(t time.Time) error { func (s *Stream) SetReadDeadline(t time.Time) error {
s.readDeadline = t s.readDeadline.Store(t)
return nil return nil
} }
// SetWriteDeadline sets the deadline for future Write calls // SetWriteDeadline sets the deadline for future Write calls
func (s *Stream) SetWriteDeadline(t time.Time) error { func (s *Stream) SetWriteDeadline(t time.Time) error {
s.writeDeadline = t s.writeDeadline.Store(t)
return nil return nil
} }

View File

@ -1,5 +1,20 @@
package yamux package yamux
import (
"sync"
"time"
)
var (
timerPool = &sync.Pool{
New: func() interface{} {
timer := time.NewTimer(time.Hour * 1e6)
timer.Stop()
return timer
},
}
)
// asyncSendErr is used to try an async send of an error // asyncSendErr is used to try an async send of an error
func asyncSendErr(ch chan error, err error) { func asyncSendErr(ch chan error, err error) {
if ch == nil { if ch == nil {

View File

@ -1126,6 +1126,10 @@ type Memory struct {
// MaxMem is the maximum amount of memory that can be made available // MaxMem is the maximum amount of memory that can be made available
// to the guest through e.g. hot pluggable memory. // to the guest through e.g. hot pluggable memory.
MaxMem string MaxMem string
// Path is the file path of the memory device. It points to a local
// file path used by FileBackedMem.
Path string
} }
// Kernel is the guest kernel configuration structure. // Kernel is the guest kernel configuration structure.
@ -1167,10 +1171,20 @@ type Knobs struct {
// MemPrealloc will allocate all the RAM upfront // MemPrealloc will allocate all the RAM upfront
MemPrealloc bool MemPrealloc bool
// FileBackedMem requires Memory.Size and Memory.Path of the VM to
// be set.
FileBackedMem bool
// FileBackedMemShared will set the FileBackedMem device as shared.
FileBackedMemShared bool
// Mlock will control locking of memory // Mlock will control locking of memory
// Only active when Realtime is set to true // Only active when Realtime is set to true
Mlock bool Mlock bool
// Stopped will not start guest CPU at startup
Stopped bool
// Realtime will enable realtime QEMU // Realtime will enable realtime QEMU
Realtime bool Realtime bool
} }
@ -1180,6 +1194,24 @@ type IOThread struct {
ID string ID string
} }
const (
// MigrationFD is the migration incoming type based on open file descriptor.
// Skip default 0 so that it must be set on purpose.
MigrationFD = 1
// MigrationExec is the migration incoming type based on commands.
MigrationExec = 2
)
// Incoming controls migration source preparation
type Incoming struct {
// Possible values are MigrationFD, MigrationExec
MigrationType int
// Only valid if MigrationType == MigrationFD
FD *os.File
// Only valid if MigrationType == MigrationExec
Exec string
}
// Config is the qemu configuration structure. // Config is the qemu configuration structure.
// It allows for passing custom settings and parameters to the qemu API. // It allows for passing custom settings and parameters to the qemu API.
type Config struct { type Config struct {
@ -1231,6 +1263,9 @@ type Config struct {
// Bios is the -bios parameter // Bios is the -bios parameter
Bios string Bios string
// Incoming controls migration source preparation
Incoming Incoming
// fds is a list of open file descriptors to be passed to the spawned qemu process // fds is a list of open file descriptors to be passed to the spawned qemu process
fds []*os.File fds []*os.File
@ -1433,23 +1468,7 @@ func (config *Config) appendKernel() {
} }
} }
func (config *Config) appendKnobs() { func (config *Config) appendMemoryKnobs() {
if config.Knobs.NoUserConfig == true {
config.qemuParams = append(config.qemuParams, "-no-user-config")
}
if config.Knobs.NoDefaults == true {
config.qemuParams = append(config.qemuParams, "-nodefaults")
}
if config.Knobs.NoGraphic == true {
config.qemuParams = append(config.qemuParams, "-nographic")
}
if config.Knobs.Daemonize == true {
config.qemuParams = append(config.qemuParams, "-daemonize")
}
if config.Knobs.HugePages == true { if config.Knobs.HugePages == true {
if config.Memory.Size != "" { if config.Memory.Size != "" {
dimmName := "dimm1" dimmName := "dimm1"
@ -1474,7 +1493,42 @@ func (config *Config) appendKnobs() {
config.qemuParams = append(config.qemuParams, "-device") config.qemuParams = append(config.qemuParams, "-device")
config.qemuParams = append(config.qemuParams, deviceMemParam) config.qemuParams = append(config.qemuParams, deviceMemParam)
} }
} else if config.Knobs.FileBackedMem == true {
if config.Memory.Size != "" && config.Memory.Path != "" {
dimmName := "dimm1"
objMemParam := "memory-backend-file,id=" + dimmName + ",size=" + config.Memory.Size + ",mem-path=" + config.Memory.Path
if config.Knobs.FileBackedMemShared == true {
objMemParam += ",share=on"
}
numaMemParam := "node,memdev=" + dimmName
config.qemuParams = append(config.qemuParams, "-object")
config.qemuParams = append(config.qemuParams, objMemParam)
config.qemuParams = append(config.qemuParams, "-numa")
config.qemuParams = append(config.qemuParams, numaMemParam)
}
} }
}
func (config *Config) appendKnobs() {
if config.Knobs.NoUserConfig == true {
config.qemuParams = append(config.qemuParams, "-no-user-config")
}
if config.Knobs.NoDefaults == true {
config.qemuParams = append(config.qemuParams, "-nodefaults")
}
if config.Knobs.NoGraphic == true {
config.qemuParams = append(config.qemuParams, "-nographic")
}
if config.Knobs.Daemonize == true {
config.qemuParams = append(config.qemuParams, "-daemonize")
}
config.appendMemoryKnobs()
if config.Knobs.Realtime == true { if config.Knobs.Realtime == true {
config.qemuParams = append(config.qemuParams, "-realtime") config.qemuParams = append(config.qemuParams, "-realtime")
@ -1495,6 +1549,10 @@ func (config *Config) appendKnobs() {
config.qemuParams = append(config.qemuParams, "mlock=off") config.qemuParams = append(config.qemuParams, "mlock=off")
} }
} }
if config.Knobs.Stopped == true {
config.qemuParams = append(config.qemuParams, "-S")
}
} }
func (config *Config) appendBios() { func (config *Config) appendBios() {
@ -1513,6 +1571,20 @@ func (config *Config) appendIOThreads() {
} }
} }
func (config *Config) appendIncoming() {
var uri string
switch config.Incoming.MigrationType {
case MigrationExec:
uri = fmt.Sprintf("exec:%s", config.Incoming.Exec)
case MigrationFD:
chFDs := config.appendFDs([]*os.File{config.Incoming.FD})
uri = fmt.Sprintf("fd:%d", chFDs[0])
default:
return
}
config.qemuParams = append(config.qemuParams, "-S", "-incoming", uri)
}
// LaunchQemu can be used to launch a new qemu instance. // LaunchQemu can be used to launch a new qemu instance.
// //
// The Config parameter contains a set of qemu parameters and settings. // The Config parameter contains a set of qemu parameters and settings.
@ -1537,6 +1609,7 @@ func LaunchQemu(config Config, logger QMPLog) (string, error) {
config.appendKernel() config.appendKernel()
config.appendBios() config.appendBios()
config.appendIOThreads() config.appendIOThreads()
config.appendIncoming()
if err := config.appendCPUs(); err != nil { if err := config.appendCPUs(); err != nil {
return "", err return "", err

View File

@ -828,3 +828,56 @@ func (q *QMP) ExecuteQueryHotpluggableCPUs(ctx context.Context) ([]HotpluggableC
return cpus, nil return cpus, nil
} }
// ExecSetMigrationCaps sets migration capabilities
func (q *QMP) ExecSetMigrationCaps(ctx context.Context, caps []map[string]interface{}) error {
args := map[string]interface{}{
"capabilities": caps,
}
return q.executeCommand(ctx, "migrate-set-capabilities", args, nil)
}
// ExecSetMigrateArguments sets the command line used for migration
func (q *QMP) ExecSetMigrateArguments(ctx context.Context, url string) error {
args := map[string]interface{}{
"uri": url,
}
return q.executeCommand(ctx, "migrate", args, nil)
}
// ExecHotplugMemory adds size of MiB memory to the guest
func (q *QMP) ExecHotplugMemory(ctx context.Context, qomtype, id, mempath string, size int) error {
args := map[string]interface{}{
"qom-type": qomtype,
"id": id,
"props": map[string]interface{}{"size": uint64(size) << 20},
}
if mempath != "" {
args["mem-path"] = mempath
}
err := q.executeCommand(ctx, "object-add", args, nil)
if err != nil {
return err
}
defer func() {
if err != nil {
q.cfg.Logger.Errorf("Unable to hotplug memory device: %v", err)
err = q.executeCommand(ctx, "object-del", map[string]interface{}{"id": id}, nil)
if err != nil {
q.cfg.Logger.Warningf("Unable to clean up memory object: %v", err)
}
}
}()
args = map[string]interface{}{
"driver": "pc-dimm",
"id": "dimm" + id,
"memdev": id,
}
err = q.executeCommand(ctx, "device_add", args, nil)
return err
}

View File

@ -76,8 +76,16 @@ const (
// CPUDevice is CPU device type // CPUDevice is CPU device type
cpuDev cpuDev
// memoryDevice is memory device type
memoryDev
) )
type memoryDevice struct {
slot int
sizeMB int
}
// Set sets an hypervisor type based on the input string. // Set sets an hypervisor type based on the input string.
func (hType *HypervisorType) Set(value string) error { func (hType *HypervisorType) Set(value string) error {
switch value { switch value {

View File

@ -7,6 +7,7 @@ package virtcontainers
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -39,12 +40,15 @@ type CPUDevice struct {
type QemuState struct { type QemuState struct {
Bridges []Bridge Bridges []Bridge
// HotpluggedCPUs is the list of CPUs that were hot-added // HotpluggedCPUs is the list of CPUs that were hot-added
HotpluggedVCPUs []CPUDevice HotpluggedVCPUs []CPUDevice
UUID string HotpluggedMemory int
UUID string
} }
// qemu is an Hypervisor interface implementation for the Linux qemu hypervisor. // qemu is an Hypervisor interface implementation for the Linux qemu hypervisor.
type qemu struct { type qemu struct {
vmConfig Resources
config HypervisorConfig config HypervisorConfig
qmpMonitorCh qmpChannel qmpMonitorCh qmpChannel
@ -169,6 +173,7 @@ func (q *qemu) init(sandbox *Sandbox) error {
return err return err
} }
q.vmConfig = sandbox.config.VMConfig
q.config = sandbox.config.HypervisorConfig q.config = sandbox.config.HypervisorConfig
q.sandbox = sandbox q.sandbox = sandbox
q.arch = newQemuArch(q.config) q.arch = newQemuArch(q.config)
@ -204,20 +209,27 @@ func (q *qemu) cpuTopology() govmmQemu.SMP {
return q.arch.cpuTopology(q.config.DefaultVCPUs, q.config.DefaultMaxVCPUs) return q.arch.cpuTopology(q.config.DefaultVCPUs, q.config.DefaultMaxVCPUs)
} }
func (q *qemu) memoryTopology(sandboxConfig SandboxConfig) (govmmQemu.Memory, error) { func (q *qemu) hostMemMB() (uint64, error) {
hostMemKb, err := getHostMemorySizeKb(procMemInfo) hostMemKb, err := getHostMemorySizeKb(procMemInfo)
if err != nil { if err != nil {
return govmmQemu.Memory{}, fmt.Errorf("Unable to read memory info: %s", err) return 0, fmt.Errorf("Unable to read memory info: %s", err)
} }
if hostMemKb == 0 { if hostMemKb == 0 {
return govmmQemu.Memory{}, fmt.Errorf("Error host memory size 0") return 0, fmt.Errorf("Error host memory size 0")
} }
hostMemMb := uint64(float64(hostMemKb / 1024)) return hostMemKb / 1024, nil
}
func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
hostMemMb, err := q.hostMemMB()
if err != nil {
return govmmQemu.Memory{}, err
}
memMb := uint64(q.config.DefaultMemSz) memMb := uint64(q.config.DefaultMemSz)
if sandboxConfig.VMConfig.Memory > 0 { if q.vmConfig.Memory > 0 {
memMb = uint64(sandboxConfig.VMConfig.Memory) memMb = uint64(q.vmConfig.Memory)
} }
return q.arch.memoryTopology(memMb, hostMemMb), nil return q.arch.memoryTopology(memMb, hostMemMb), nil
@ -271,7 +283,7 @@ func (q *qemu) createSandbox(sandboxConfig SandboxConfig) error {
smp := q.cpuTopology() smp := q.cpuTopology()
memory, err := q.memoryTopology(sandboxConfig) memory, err := q.memoryTopology()
if err != nil { if err != nil {
return err return err
} }
@ -705,6 +717,9 @@ func (q *qemu) hotplugDevice(devInfo interface{}, devType deviceType, op operati
// TODO: find a way to remove dependency of deviceDrivers lib @weizhang555 // TODO: find a way to remove dependency of deviceDrivers lib @weizhang555
device := devInfo.(deviceDrivers.VFIODevice) device := devInfo.(deviceDrivers.VFIODevice)
return nil, q.hotplugVFIODevice(device, op) return nil, q.hotplugVFIODevice(device, op)
case memoryDev:
memdev := devInfo.(*memoryDevice)
return nil, q.hotplugMemory(memdev, op)
default: default:
return nil, fmt.Errorf("cannot hotplug device: unsupported device type '%v'", devType) return nil, fmt.Errorf("cannot hotplug device: unsupported device type '%v'", devType)
} }
@ -837,6 +852,63 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
return amount, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state) return amount, q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
} }
func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error {
if memDev.sizeMB < 0 {
return fmt.Errorf("cannot hotplug negative size (%d) memory", memDev.sizeMB)
}
// We do not support memory hot unplug.
if op == removeDevice {
return errors.New("cannot hot unplug memory device")
}
maxMem, err := q.hostMemMB()
if err != nil {
return err
}
// calculate current memory
currentMemory := int(q.config.DefaultMemSz)
if q.vmConfig.Memory > 0 {
currentMemory = int(q.vmConfig.Memory)
}
currentMemory += q.state.HotpluggedMemory
// Don't exceed the maximum amount of memory
if currentMemory+memDev.sizeMB > int(maxMem) {
return fmt.Errorf("Unable to hotplug %d MiB memory, the SB has %d MiB and the maximum amount is %d MiB",
memDev.sizeMB, currentMemory, q.config.DefaultMemSz)
}
return q.hotplugAddMemory(memDev)
}
func (q *qemu) hotplugAddMemory(memDev *memoryDevice) error {
// setup qmp channel if necessary
if q.qmpMonitorCh.qmp == nil {
qmp, err := q.qmpSetup()
if err != nil {
return err
}
q.qmpMonitorCh.qmp = qmp
defer func() {
qmp.Shutdown()
q.qmpMonitorCh.qmp = nil
}()
}
err := q.qmpMonitorCh.qmp.ExecHotplugMemory(q.qmpMonitorCh.ctx, "memory-backend-ram", "mem"+strconv.Itoa(memDev.slot), "", memDev.sizeMB)
if err != nil {
q.Logger().WithError(err).Error("hotplug memory")
return err
}
q.state.HotpluggedMemory += memDev.sizeMB
return q.sandbox.storage.storeHypervisorState(q.sandbox.id, q.state)
}
func (q *qemu) pauseSandbox() error { func (q *qemu) pauseSandbox() error {
return q.togglePauseSandbox(true) return q.togglePauseSandbox(true)
} }

View File

@ -167,15 +167,11 @@ func TestQemuMemoryTopology(t *testing.T) {
MaxMem: memMax, MaxMem: memMax,
} }
vmConfig := Resources{ q.vmConfig = Resources{
Memory: uint(mem), Memory: uint(mem),
} }
sandboxConfig := SandboxConfig{ memory, err := q.memoryTopology()
VMConfig: vmConfig,
}
memory, err := q.memoryTopology(sandboxConfig)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -323,3 +319,29 @@ func TestQemuQemuPath(t *testing.T) {
assert.Error(err) assert.Error(err)
assert.Equal(path, "") assert.Equal(path, "")
} }
func TestHotplugRemoveMemory(t *testing.T) {
assert := assert.New(t)
qemuConfig := newQemuConfig()
q := &qemu{
config: qemuConfig,
}
_, err := q.hotplugRemoveDevice(&memoryDevice{0, 128}, memoryDev)
assert.Error(err)
}
func TestHotplugUnsupportedDeviceType(t *testing.T) {
assert := assert.New(t)
qemuConfig := newQemuConfig()
q := &qemu{
config: qemuConfig,
}
_, err := q.hotplugAddDevice(&memoryDevice{0, 128}, fsDev)
assert.Error(err)
_, err = q.hotplugRemoveDevice(&memoryDevice{0, 128}, fsDev)
assert.Error(err)
}