hypervisor: Export generic interface methods

This is in preparation for creating a seperate hypervisor package.
Non functional change.

Signed-off-by: Manohar Castelino <mcastelino@apple.com>
This commit is contained in:
Manohar Castelino 2021-09-20 10:35:28 -07:00 committed by Eric Ernst
parent 6baf2586ee
commit 4d47aeef2e
37 changed files with 341 additions and 341 deletions

View File

@ -155,14 +155,14 @@ func (a *Acrn) kernelParameters() string {
} }
// Adds all capabilities supported by Acrn implementation of hypervisor interface // Adds all capabilities supported by Acrn implementation of hypervisor interface
func (a *Acrn) capabilities(ctx context.Context) types.Capabilities { func (a *Acrn) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, a.Logger(), "capabilities", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "Capabilities", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
return a.arch.capabilities() return a.arch.capabilities()
} }
func (a *Acrn) hypervisorConfig() HypervisorConfig { func (a *Acrn) HypervisorConfig() HypervisorConfig {
return a.config return a.config
} }
@ -248,7 +248,7 @@ func (a *Acrn) buildDevices(ctx context.Context, imagePath string) ([]Device, er
return nil, fmt.Errorf("Image Path should not be empty: %s", imagePath) return nil, fmt.Errorf("Image Path should not be empty: %s", imagePath)
} }
_, console, err := a.getSandboxConsole(ctx, a.id) _, console, err := a.GetSandboxConsole(ctx, a.id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -501,7 +501,7 @@ func (a *Acrn) stopSandbox(ctx context.Context, waitOnly bool) (err error) {
Idx := acrnUUIDsToIdx[uuid] Idx := acrnUUIDsToIdx[uuid]
if err = a.loadInfo(); err != nil { if err = a.loadInfo(); err != nil {
a.Logger().Info("Failed to load UUID availabiity info") a.Logger().Info("Failed to Load UUID availabiity info")
return err return err
} }
@ -554,8 +554,8 @@ func (a *Acrn) updateBlockDevice(drive *config.BlockDrive) error {
return err return err
} }
func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (a *Acrn) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "hotplugAddDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "HotplugAddDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
switch devType { switch devType {
@ -563,13 +563,13 @@ func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devTyp
//The drive placeholder has to exist prior to Update //The drive placeholder has to exist prior to Update
return nil, a.updateBlockDevice(devInfo.(*config.BlockDrive)) return nil, a.updateBlockDevice(devInfo.(*config.BlockDrive))
default: default:
return nil, fmt.Errorf("hotplugAddDevice: unsupported device: devInfo:%v, deviceType%v", return nil, fmt.Errorf("HotplugAddDevice: unsupported device: devInfo:%v, deviceType%v",
devInfo, devType) devInfo, devType)
} }
} }
func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (a *Acrn) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "hotplugRemoveDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "HotplugRemoveDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
// Not supported. return success // Not supported. return success
@ -596,9 +596,9 @@ func (a *Acrn) resumeSandbox(ctx context.Context) error {
} }
// addDevice will add extra devices to acrn command line. // addDevice will add extra devices to acrn command line.
func (a *Acrn) addDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error { func (a *Acrn) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
var err error var err error
span, _ := katatrace.Trace(ctx, a.Logger(), "addDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "AddDevice", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
switch v := devInfo.(type) { switch v := devInfo.(type) {
@ -630,8 +630,8 @@ func (a *Acrn) addDevice(ctx context.Context, devInfo interface{}, devType Devic
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (a *Acrn) getSandboxConsole(ctx context.Context, id string) (string, string, error) { func (a *Acrn) GetSandboxConsole(ctx context.Context, id string) (string, string, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "getSandboxConsole", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "GetSandboxConsole", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
consoleURL, err := utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket) consoleURL, err := utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket)
@ -643,22 +643,22 @@ func (a *Acrn) getSandboxConsole(ctx context.Context, id string) (string, string
} }
func (a *Acrn) saveSandbox() error { func (a *Acrn) saveSandbox() error {
a.Logger().Info("save sandbox") a.Logger().Info("Save sandbox")
// Not supported. return success // Not supported. return success
return nil return nil
} }
func (a *Acrn) disconnect(ctx context.Context) { func (a *Acrn) Disconnect(ctx context.Context) {
span, _ := katatrace.Trace(ctx, a.Logger(), "disconnect", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "Disconnect", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
// Not supported. // Not supported.
} }
func (a *Acrn) getThreadIDs(ctx context.Context) (VcpuThreadIDs, error) { func (a *Acrn) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
span, _ := katatrace.Trace(ctx, a.Logger(), "getThreadIDs", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "GetThreadIDs", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
// Not supported. return success // Not supported. return success
@ -667,26 +667,26 @@ func (a *Acrn) getThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
return VcpuThreadIDs{}, nil return VcpuThreadIDs{}, nil
} }
func (a *Acrn) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) { func (a *Acrn) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
return 0, MemoryDevice{}, nil return 0, MemoryDevice{}, nil
} }
func (a *Acrn) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (a *Acrn) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
return 0, 0, nil return 0, 0, nil
} }
func (a *Acrn) cleanup(ctx context.Context) error { func (a *Acrn) Cleanup(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, a.Logger(), "cleanup", acrnTracingTags, map[string]string{"sandbox_id": a.id}) span, _ := katatrace.Trace(ctx, a.Logger(), "Cleanup", acrnTracingTags, map[string]string{"sandbox_id": a.id})
defer span.End() defer span.End()
return nil return nil
} }
func (a *Acrn) getPids() []int { func (a *Acrn) GetPids() []int {
return []int{a.state.PID} return []int{a.state.PID}
} }
func (a *Acrn) getVirtioFsPid() *int { func (a *Acrn) GetVirtioFsPid() *int {
return nil return nil
} }
@ -698,19 +698,19 @@ func (a *Acrn) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("acrn is not supported by VM cache") return nil, errors.New("acrn is not supported by VM cache")
} }
func (a *Acrn) save() (s persistapi.HypervisorState) { func (a *Acrn) Save() (s persistapi.HypervisorState) {
s.Pid = a.state.PID s.Pid = a.state.PID
s.Type = string(AcrnHypervisor) s.Type = string(AcrnHypervisor)
s.UUID = a.state.UUID s.UUID = a.state.UUID
return return
} }
func (a *Acrn) load(s persistapi.HypervisorState) { func (a *Acrn) Load(s persistapi.HypervisorState) {
a.state.PID = s.Pid a.state.PID = s.Pid
a.state.UUID = s.UUID a.state.UUID = s.UUID
} }
func (a *Acrn) check() error { func (a *Acrn) Check() error {
if err := syscall.Kill(a.state.PID, syscall.Signal(0)); err != nil { if err := syscall.Kill(a.state.PID, syscall.Signal(0)); err != nil {
return errors.Wrapf(err, "failed to ping acrn process") return errors.Wrapf(err, "failed to ping acrn process")
} }
@ -718,7 +718,7 @@ func (a *Acrn) check() error {
return nil return nil
} }
func (a *Acrn) generateSocket(id string) (interface{}, error) { func (a *Acrn) GenerateSocket(id string) (interface{}, error) {
return generateVMSocket(id, a.store.RunVMStoragePath()) return generateVMSocket(id, a.store.RunVMStoragePath())
} }
@ -810,7 +810,7 @@ func (a *Acrn) loadInfo() error {
return nil return nil
} }
func (a *Acrn) isRateLimiterBuiltin() bool { func (a *Acrn) IsRateLimiterBuiltin() bool {
return false return false
} }

View File

@ -77,7 +77,7 @@ func TestAcrnCapabilities(t *testing.T) {
arch: &acrnArchBase{}, arch: &acrnArchBase{},
} }
caps := a.capabilities(a.ctx) caps := a.Capabilities(a.ctx)
assert.True(caps.IsBlockDeviceSupported()) assert.True(caps.IsBlockDeviceSupported())
assert.True(caps.IsBlockDeviceHotplugSupported()) assert.True(caps.IsBlockDeviceHotplugSupported())
} }
@ -89,7 +89,7 @@ func testAcrnAddDevice(t *testing.T, devInfo interface{}, devType DeviceType, ex
arch: &acrnArchBase{}, arch: &acrnArchBase{},
} }
err := a.addDevice(context.Background(), devInfo, devType) err := a.AddDevice(context.Background(), devInfo, devType)
assert.NoError(err) assert.NoError(err)
assert.Exactly(a.acrnConfig.Devices, expected) assert.Exactly(a.acrnConfig.Devices, expected)
} }
@ -144,7 +144,7 @@ func TestAcrnHotplugUnsupportedDeviceType(t *testing.T) {
config: acrnConfig, config: acrnConfig,
} }
_, err := a.hotplugAddDevice(a.ctx, &MemoryDevice{0, 128, uint64(0), false}, FsDev) _, err := a.HotplugAddDevice(a.ctx, &MemoryDevice{0, 128, uint64(0), false}, FsDev)
assert.Error(err) assert.Error(err)
} }
@ -205,7 +205,7 @@ func TestAcrnGetSandboxConsole(t *testing.T) {
sandboxID := "testSandboxID" sandboxID := "testSandboxID"
expected := filepath.Join(a.store.RunVMStoragePath(), sandboxID, consoleSocket) expected := filepath.Join(a.store.RunVMStoragePath(), sandboxID, consoleSocket)
proto, result, err := a.getSandboxConsole(a.ctx, sandboxID) proto, result, err := a.GetSandboxConsole(a.ctx, sandboxID)
assert.NoError(err) assert.NoError(err)
assert.Equal(result, expected) assert.Equal(result, expected)
assert.Equal(proto, consoleProtoUnix) assert.Equal(proto, consoleProtoUnix)

View File

@ -63,7 +63,7 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
return nil, err return nil, err
} }
// cleanup sandbox resources in case of any failure // Cleanup sandbox resources in case of any failure
defer func() { defer func() {
if err != nil { if err != nil {
s.Delete(ctx) s.Delete(ctx)

View File

@ -100,7 +100,7 @@ func (endpoint *BridgedMacvlanEndpoint) Attach(ctx context.Context, s *Sandbox)
return err return err
} }
return h.addDevice(ctx, endpoint, NetDev) return h.AddDevice(ctx, endpoint, NetDev)
} }
// Detach for the virtual endpoint tears down the tap and bridge // Detach for the virtual endpoint tears down the tap and bridge

View File

@ -171,7 +171,7 @@ type cloudHypervisor struct {
var clhKernelParams = []Param{ var clhKernelParams = []Param{
{"root", "/dev/pmem0p1"}, {"root", "/dev/pmem0p1"},
{"panic", "1"}, // upon kernel panic wait 1 second before reboot {"panic", "1"}, // upon kernel panic wait 1 second before reboot
{"no_timer_check", ""}, // do not check broken timer IRQ resources {"no_timer_check", ""}, // do not Check broken timer IRQ resources
{"noreplace-smp", ""}, // do not replace SMP instructions {"noreplace-smp", ""}, // do not replace SMP instructions
{"rootflags", "dax,data=ordered,errors=remount-ro ro"}, // mount the root filesystem as readonly {"rootflags", "dax,data=ordered,errors=remount-ro ro"}, // mount the root filesystem as readonly
{"rootfstype", "ext4"}, {"rootfstype", "ext4"},
@ -425,8 +425,8 @@ func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (clh *cloudHypervisor) getSandboxConsole(ctx context.Context, id string) (string, string, error) { func (clh *cloudHypervisor) GetSandboxConsole(ctx context.Context, id string) (string, string, error) {
clh.Logger().WithField("function", "getSandboxConsole").WithField("id", id).Info("Get Sandbox Console") clh.Logger().WithField("function", "GetSandboxConsole").WithField("id", id).Info("Get Sandbox Console")
master, slave, err := console.NewPty() master, slave, err := console.NewPty()
if err != nil { if err != nil {
clh.Logger().WithError(err).Error("Error create pseudo tty") clh.Logger().WithError(err).Error("Error create pseudo tty")
@ -437,13 +437,13 @@ func (clh *cloudHypervisor) getSandboxConsole(ctx context.Context, id string) (s
return consoleProtoPty, slave, nil return consoleProtoPty, slave, nil
} }
func (clh *cloudHypervisor) disconnect(ctx context.Context) { func (clh *cloudHypervisor) Disconnect(ctx context.Context) {
clh.Logger().WithField("function", "disconnect").Info("Disconnecting Sandbox Console") clh.Logger().WithField("function", "Disconnect").Info("Disconnecting Sandbox Console")
} }
func (clh *cloudHypervisor) getThreadIDs(ctx context.Context) (VcpuThreadIDs, error) { func (clh *cloudHypervisor) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
clh.Logger().WithField("function", "getThreadIDs").Info("get thread ID's") clh.Logger().WithField("function", "GetThreadIDs").Info("get thread ID's")
var vcpuInfo VcpuThreadIDs var vcpuInfo VcpuThreadIDs
@ -550,8 +550,8 @@ func (clh *cloudHypervisor) hotPlugVFIODevice(device *config.VFIODev) error {
return err return err
} }
func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (clh *cloudHypervisor) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "hotplugAddDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id}) span, _ := katatrace.Trace(ctx, clh.Logger(), "HotplugAddDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End() defer span.End()
switch devType { switch devType {
@ -567,8 +567,8 @@ func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interf
} }
func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (clh *cloudHypervisor) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, clh.Logger(), "hotplugRemoveDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id}) span, _ := katatrace.Trace(ctx, clh.Logger(), "HotplugRemoveDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End() defer span.End()
var deviceID string var deviceID string
@ -580,7 +580,7 @@ func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo int
deviceID = devInfo.(*config.VFIODev).ID deviceID = devInfo.(*config.VFIODev).ID
default: default:
clh.Logger().WithFields(log.Fields{"devInfo": devInfo, clh.Logger().WithFields(log.Fields{"devInfo": devInfo,
"deviceType": devType}).Error("hotplugRemoveDevice: unsupported device") "deviceType": devType}).Error("HotplugRemoveDevice: unsupported device")
return nil, fmt.Errorf("Could not hot remove device: unsupported device: %v, type: %v", return nil, fmt.Errorf("Could not hot remove device: unsupported device: %v, type: %v",
devInfo, devType) devInfo, devType)
} }
@ -599,11 +599,11 @@ func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo int
return nil, err return nil, err
} }
func (clh *cloudHypervisor) hypervisorConfig() HypervisorConfig { func (clh *cloudHypervisor) HypervisorConfig() HypervisorConfig {
return clh.config return clh.config
} }
func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) { func (clh *cloudHypervisor) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
// TODO: Add support for virtio-mem // TODO: Add support for virtio-mem
@ -624,7 +624,7 @@ func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, m
currentMem := utils.MemUnit(info.Config.Memory.Size) * utils.Byte currentMem := utils.MemUnit(info.Config.Memory.Size) * utils.Byte
newMem := utils.MemUnit(reqMemMB) * utils.MiB newMem := utils.MemUnit(reqMemMB) * utils.MiB
// Early check to verify if boot memory is the same as requested // Early Check to verify if boot memory is the same as requested
if currentMem == newMem { if currentMem == newMem {
clh.Logger().WithField("memory", reqMemMB).Debugf("VM already has requested memory") clh.Logger().WithField("memory", reqMemMB).Debugf("VM already has requested memory")
return uint32(currentMem.ToMiB()), MemoryDevice{}, nil return uint32(currentMem.ToMiB()), MemoryDevice{}, nil
@ -645,7 +645,7 @@ func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, m
newMem = alignedRequest newMem = alignedRequest
} }
// Check if memory is the same as requested, a second check is done // Check if memory is the same as requested, a second Check is done
// to consider the memory request now that is updated to be memory aligned // to consider the memory request now that is updated to be memory aligned
if currentMem == newMem { if currentMem == newMem {
clh.Logger().WithFields(log.Fields{"current-memory": currentMem, "new-memory": newMem}).Debug("VM already has requested memory(after alignment)") clh.Logger().WithFields(log.Fields{"current-memory": currentMem, "new-memory": newMem}).Debug("VM already has requested memory(after alignment)")
@ -669,27 +669,27 @@ func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, m
return uint32(newMem.ToMiB()), MemoryDevice{SizeMB: int(hotplugSize.ToMiB())}, nil return uint32(newMem.ToMiB()), MemoryDevice{SizeMB: int(hotplugSize.ToMiB())}, nil
} }
func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (clh *cloudHypervisor) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
cl := clh.client() cl := clh.client()
// Retrieve the number of current vCPUs via HTTP API // Retrieve the number of current vCPUs via HTTP API
info, err := clh.vmInfo() info, err := clh.vmInfo()
if err != nil { if err != nil {
clh.Logger().WithField("function", "resizeVCPUs").WithError(err).Info("[clh] vmInfo failed") clh.Logger().WithField("function", "ResizeVCPUs").WithError(err).Info("[clh] vmInfo failed")
return 0, 0, openAPIClientError(err) return 0, 0, openAPIClientError(err)
} }
currentVCPUs = uint32(info.Config.Cpus.BootVcpus) currentVCPUs = uint32(info.Config.Cpus.BootVcpus)
newVCPUs = currentVCPUs newVCPUs = currentVCPUs
// Sanity check // Sanity Check
if reqVCPUs == 0 { if reqVCPUs == 0 {
clh.Logger().WithField("function", "resizeVCPUs").Debugf("Cannot resize vCPU to 0") clh.Logger().WithField("function", "ResizeVCPUs").Debugf("Cannot resize vCPU to 0")
return currentVCPUs, newVCPUs, fmt.Errorf("Cannot resize vCPU to 0") return currentVCPUs, newVCPUs, fmt.Errorf("Cannot resize vCPU to 0")
} }
if reqVCPUs > uint32(info.Config.Cpus.MaxVcpus) { if reqVCPUs > uint32(info.Config.Cpus.MaxVcpus) {
clh.Logger().WithFields(log.Fields{ clh.Logger().WithFields(log.Fields{
"function": "resizeVCPUs", "function": "ResizeVCPUs",
"reqVCPUs": reqVCPUs, "reqVCPUs": reqVCPUs,
"clhMaxVCPUs": info.Config.Cpus.MaxVcpus, "clhMaxVCPUs": info.Config.Cpus.MaxVcpus,
}).Warn("exceeding the 'clhMaxVCPUs' (resizing to 'clhMaxVCPUs')") }).Warn("exceeding the 'clhMaxVCPUs' (resizing to 'clhMaxVCPUs')")
@ -711,8 +711,8 @@ func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (c
return currentVCPUs, newVCPUs, nil return currentVCPUs, newVCPUs, nil
} }
func (clh *cloudHypervisor) cleanup(ctx context.Context) error { func (clh *cloudHypervisor) Cleanup(ctx context.Context) error {
clh.Logger().WithField("function", "cleanup").Info("cleanup") clh.Logger().WithField("function", "Cleanup").Info("Cleanup")
return nil return nil
} }
@ -747,7 +747,7 @@ func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("cloudHypervisor is not supported by VM cache") return nil, errors.New("cloudHypervisor is not supported by VM cache")
} }
func (clh *cloudHypervisor) save() (s persistapi.HypervisorState) { func (clh *cloudHypervisor) Save() (s persistapi.HypervisorState) {
s.Pid = clh.state.PID s.Pid = clh.state.PID
s.Type = string(ClhHypervisor) s.Type = string(ClhHypervisor)
s.VirtiofsdPid = clh.state.VirtiofsdPID s.VirtiofsdPid = clh.state.VirtiofsdPID
@ -755,13 +755,13 @@ func (clh *cloudHypervisor) save() (s persistapi.HypervisorState) {
return return
} }
func (clh *cloudHypervisor) load(s persistapi.HypervisorState) { func (clh *cloudHypervisor) Load(s persistapi.HypervisorState) {
clh.state.PID = s.Pid clh.state.PID = s.Pid
clh.state.VirtiofsdPID = s.VirtiofsdPid clh.state.VirtiofsdPID = s.VirtiofsdPid
clh.state.apiSocket = s.APISocket clh.state.apiSocket = s.APISocket
} }
func (clh *cloudHypervisor) check() error { func (clh *cloudHypervisor) Check() error {
cl := clh.client() cl := clh.client()
ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second)
defer cancel() defer cancel()
@ -770,16 +770,16 @@ func (clh *cloudHypervisor) check() error {
return err return err
} }
func (clh *cloudHypervisor) getPids() []int { func (clh *cloudHypervisor) GetPids() []int {
return []int{clh.state.PID} return []int{clh.state.PID}
} }
func (clh *cloudHypervisor) getVirtioFsPid() *int { func (clh *cloudHypervisor) GetVirtioFsPid() *int {
return &clh.state.VirtiofsdPID return &clh.state.VirtiofsdPID
} }
func (clh *cloudHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error { func (clh *cloudHypervisor) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
span, _ := katatrace.Trace(ctx, clh.Logger(), "addDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id}) span, _ := katatrace.Trace(ctx, clh.Logger(), "AddDevice", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End() defer span.End()
var err error var err error
@ -794,7 +794,7 @@ func (clh *cloudHypervisor) addDevice(ctx context.Context, devInfo interface{},
case types.Volume: case types.Volume:
err = clh.addVolume(v) err = clh.addVolume(v)
default: default:
clh.Logger().WithField("function", "addDevice").Warnf("Add device of type %v is not supported.", v) clh.Logger().WithField("function", "AddDevice").Warnf("Add device of type %v is not supported.", v)
return fmt.Errorf("Not implemented support for %s", v) return fmt.Errorf("Not implemented support for %s", v)
} }
@ -812,11 +812,11 @@ func (clh *cloudHypervisor) Logger() *log.Entry {
} }
// Adds all capabilities supported by cloudHypervisor implementation of hypervisor interface // Adds all capabilities supported by cloudHypervisor implementation of hypervisor interface
func (clh *cloudHypervisor) capabilities(ctx context.Context) types.Capabilities { func (clh *cloudHypervisor) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, clh.Logger(), "capabilities", clhTracingTags, map[string]string{"sandbox_id": clh.id}) span, _ := katatrace.Trace(ctx, clh.Logger(), "Capabilities", clhTracingTags, map[string]string{"sandbox_id": clh.id})
defer span.End() defer span.End()
clh.Logger().WithField("function", "capabilities").Info("get Capabilities") clh.Logger().WithField("function", "Capabilities").Info("get Capabilities")
var caps types.Capabilities var caps types.Capabilities
caps.SetFsSharingSupport() caps.SetFsSharingSupport()
caps.SetBlockDeviceHotplugSupport() caps.SetBlockDeviceHotplugSupport()
@ -834,7 +834,7 @@ func (clh *cloudHypervisor) terminate(ctx context.Context, waitOnly bool) (err e
} }
defer func() { defer func() {
clh.Logger().Debug("cleanup VM") clh.Logger().Debug("Cleanup VM")
if err1 := clh.cleanupVM(true); err1 != nil { if err1 := clh.cleanupVM(true); err1 != nil {
clh.Logger().WithError(err1).Error("failed to cleanupVM") clh.Logger().WithError(err1).Error("failed to cleanupVM")
} }
@ -873,7 +873,7 @@ func (clh *cloudHypervisor) reset() {
clh.state.reset() clh.state.reset()
} }
func (clh *cloudHypervisor) generateSocket(id string) (interface{}, error) { func (clh *cloudHypervisor) GenerateSocket(id string) (interface{}, error) {
udsPath, err := clh.vsockSocketPath(id) udsPath, err := clh.vsockSocketPath(id)
if err != nil { if err != nil {
clh.Logger().Info("Can't generate socket path for cloud-hypervisor") clh.Logger().Info("Can't generate socket path for cloud-hypervisor")
@ -1206,7 +1206,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
} }
} }
// cleanup vm path // Cleanup vm path
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id) dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
// If it's a symlink, remove both dir and the target. // If it's a symlink, remove both dir and the target.
@ -1218,7 +1218,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
clh.Logger().WithFields(log.Fields{ clh.Logger().WithFields(log.Fields{
"link": link, "link": link,
"dir": dir, "dir": dir,
}).Infof("cleanup vm path") }).Infof("Cleanup vm path")
if err := os.RemoveAll(dir); err != nil { if err := os.RemoveAll(dir); err != nil {
if !force { if !force {
@ -1263,7 +1263,7 @@ func (clh *cloudHypervisor) vmInfo() (chclient.VmInfo, error) {
return info, openAPIClientError(err) return info, openAPIClientError(err)
} }
func (clh *cloudHypervisor) isRateLimiterBuiltin() bool { func (clh *cloudHypervisor) IsRateLimiterBuiltin() bool {
return false return false
} }

View File

@ -303,10 +303,10 @@ func TestCloudHypervisorResizeMemory(t *testing.T) {
clh.APIClient = mockClient clh.APIClient = mockClient
clh.config = clhConfig clh.config = clhConfig
newMem, memDev, err := clh.resizeMemory(context.Background(), tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false) newMem, memDev, err := clh.ResizeMemory(context.Background(), tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("cloudHypervisor.resizeMemory() error = %v, expected to fail = %v", err, tt.wantErr) t.Errorf("cloudHypervisor.ResizeMemory() error = %v, expected to fail = %v", err, tt.wantErr)
return return
} }
@ -317,11 +317,11 @@ func TestCloudHypervisorResizeMemory(t *testing.T) {
expectedMem := clhConfig.MemorySize + uint32(tt.expectedMemDev.SizeMB) expectedMem := clhConfig.MemorySize + uint32(tt.expectedMemDev.SizeMB)
if newMem != expectedMem { if newMem != expectedMem {
t.Errorf("cloudHypervisor.resizeMemory() got = %+v, want %+v", newMem, expectedMem) t.Errorf("cloudHypervisor.ResizeMemory() got = %+v, want %+v", newMem, expectedMem)
} }
if !reflect.DeepEqual(memDev, tt.expectedMemDev) { if !reflect.DeepEqual(memDev, tt.expectedMemDev) {
t.Errorf("cloudHypervisor.resizeMemory() got = %+v, want %+v", memDev, tt.expectedMemDev) t.Errorf("cloudHypervisor.ResizeMemory() got = %+v, want %+v", memDev, tt.expectedMemDev)
} }
}) })
} }
@ -359,13 +359,13 @@ func TestCloudHypervisorHotplugRemoveDevice(t *testing.T) {
clh.config = clhConfig clh.config = clhConfig
clh.APIClient = &clhClientMock{} clh.APIClient = &clhClientMock{}
_, err = clh.hotplugRemoveDevice(context.Background(), &config.BlockDrive{}, BlockDev) _, err = clh.HotplugRemoveDevice(context.Background(), &config.BlockDrive{}, BlockDev)
assert.NoError(err, "Hotplug remove block device expected no error") assert.NoError(err, "Hotplug remove block device expected no error")
_, err = clh.hotplugRemoveDevice(context.Background(), &config.VFIODev{}, VfioDev) _, err = clh.HotplugRemoveDevice(context.Background(), &config.VFIODev{}, VfioDev)
assert.NoError(err, "Hotplug remove vfio block device expected no error") assert.NoError(err, "Hotplug remove vfio block device expected no error")
_, err = clh.hotplugRemoveDevice(context.Background(), nil, NetDev) _, err = clh.HotplugRemoveDevice(context.Background(), nil, NetDev)
assert.Error(err, "Hotplug remove pmem block device expected error") assert.Error(err, "Hotplug remove pmem block device expected error")
} }
@ -381,7 +381,7 @@ func TestClhGenerateSocket(t *testing.T) {
clh.addVSock(1, "path") clh.addVSock(1, "path")
s, err := clh.generateSocket("c") s, err := clh.GenerateSocket("c")
assert.NoError(err) assert.NoError(err)
assert.NotNil(s) assert.NotNil(s)

View File

@ -392,7 +392,7 @@ func (c *Container) GetAnnotations() map[string]string {
// This OCI specification was patched when the sandbox was created // This OCI specification was patched when the sandbox was created
// by containerCapabilities(), SetEphemeralStorageType() and others // by containerCapabilities(), SetEphemeralStorageType() and others
// in order to support: // in order to support:
// * capabilities // * Capabilities
// * Ephemeral storage // * Ephemeral storage
// * k8s empty dir // * k8s empty dir
// If you need the original (vanilla) OCI spec, // If you need the original (vanilla) OCI spec,
@ -431,7 +431,7 @@ func (c *Container) shareFiles(ctx context.Context, m Mount, idx int) (string, b
// copy file to contaier's rootfs if filesystem sharing is not supported, otherwise // copy file to contaier's rootfs if filesystem sharing is not supported, otherwise
// bind mount it in the shared directory. // bind mount it in the shared directory.
caps := c.sandbox.hypervisor.capabilities(ctx) caps := c.sandbox.hypervisor.Capabilities(ctx)
if !caps.IsFsSharingSupported() { if !caps.IsFsSharingSupported() {
c.Logger().Debug("filesystem sharing is not supported, files will be copied") c.Logger().Debug("filesystem sharing is not supported, files will be copied")
@ -573,7 +573,7 @@ func (c *Container) mountSharedDirMounts(ctx context.Context, sharedDirMounts, i
// manually update the path that is mounted into the container). // manually update the path that is mounted into the container).
// Based on this, let's make sure we update the sharedDirMount structure with the new watchable-mount as // Based on this, let's make sure we update the sharedDirMount structure with the new watchable-mount as
// the source (this is what is utilized to update the OCI spec). // the source (this is what is utilized to update the OCI spec).
caps := c.sandbox.hypervisor.capabilities(ctx) caps := c.sandbox.hypervisor.Capabilities(ctx)
if isWatchableMount(m.Source) && caps.IsFsSharingSupported() { if isWatchableMount(m.Source) && caps.IsFsSharingSupported() {
// Create path in shared directory for creating watchable mount: // Create path in shared directory for creating watchable mount:
@ -663,7 +663,7 @@ func filterDevices(c *Container, devices []ContainerDevice) (ret []ContainerDevi
return return
} }
// Add any mount based block devices to the device manager and save the // Add any mount based block devices to the device manager and Save the
// device ID for the particular mount. This'll occur when the mountpoint source // device ID for the particular mount. This'll occur when the mountpoint source
// is a block device. // is a block device.
func (c *Container) createBlockDevices(ctx context.Context) error { func (c *Container) createBlockDevices(ctx context.Context) error {
@ -705,7 +705,7 @@ func (c *Container) createBlockDevices(ctx context.Context) error {
Minor: int64(unix.Minor(stat.Rdev)), Minor: int64(unix.Minor(stat.Rdev)),
ReadOnly: m.ReadOnly, ReadOnly: m.ReadOnly,
} }
// check whether source can be used as a pmem device // Check whether source can be used as a pmem device
} else if di, err = config.PmemDeviceInfo(m.Source, m.Destination); err != nil { } else if di, err = config.PmemDeviceInfo(m.Source, m.Destination); err != nil {
c.Logger().WithError(err). c.Logger().WithError(err).
WithField("mount-source", m.Source). WithField("mount-source", m.Source).
@ -859,7 +859,7 @@ func (c *Container) rollbackFailingContainerCreation(ctx context.Context) {
func (c *Container) checkBlockDeviceSupport(ctx context.Context) bool { func (c *Container) checkBlockDeviceSupport(ctx context.Context) bool {
if !c.sandbox.config.HypervisorConfig.DisableBlockDeviceUse { if !c.sandbox.config.HypervisorConfig.DisableBlockDeviceUse {
agentCaps := c.sandbox.agent.capabilities() agentCaps := c.sandbox.agent.capabilities()
hypervisorCaps := c.sandbox.hypervisor.capabilities(ctx) hypervisorCaps := c.sandbox.hypervisor.Capabilities(ctx)
if agentCaps.IsBlockDeviceSupported() && hypervisorCaps.IsBlockDeviceHotplugSupported() { if agentCaps.IsBlockDeviceSupported() && hypervisorCaps.IsBlockDeviceHotplugSupported() {
return true return true
@ -982,7 +982,7 @@ func (c *Container) checkSandboxRunning(cmd string) error {
} }
func (c *Container) getSystemMountInfo() { func (c *Container) getSystemMountInfo() {
// check if /dev needs to be bind mounted from host /dev // Check if /dev needs to be bind mounted from host /dev
c.systemMountsInfo.BindMountDev = false c.systemMountsInfo.BindMountDev = false
for _, m := range c.mounts { for _, m := range c.mounts {
@ -1055,7 +1055,7 @@ func (c *Container) stop(ctx context.Context, force bool) error {
// Save device and drive data. // Save device and drive data.
// TODO: can we merge this saving with setContainerState()? // TODO: can we merge this saving with setContainerState()?
if err := c.sandbox.Save(); err != nil { if err := c.sandbox.Save(); err != nil {
c.Logger().WithError(err).Info("save container state failed") c.Logger().WithError(err).Info("Save container state failed")
} }
}() }()

View File

@ -99,7 +99,7 @@ func TestContainerRemoveDrive(t *testing.T) {
container.state.Fstype = "" container.state.Fstype = ""
err := container.removeDrive(sandbox.ctx) err := container.removeDrive(sandbox.ctx)
// hotplugRemoveDevice for hypervisor should not be called. // HotplugRemoveDevice for hypervisor should not be called.
// test should pass without a hypervisor created for the container's sandbox. // test should pass without a hypervisor created for the container's sandbox.
assert.Nil(t, err, "remove drive should succeed") assert.Nil(t, err, "remove drive should succeed")
@ -329,7 +329,7 @@ func TestContainerAddDriveDir(t *testing.T) {
rootFs: RootFs{Target: fakeRootfs, Mounted: true}, rootFs: RootFs{Target: fakeRootfs, Mounted: true},
} }
// Make the checkStorageDriver func variable point to a fake check function // Make the checkStorageDriver func variable point to a fake Check function
savedFunc := checkStorageDriver savedFunc := checkStorageDriver
checkStorageDriver = func(major, minor int) (bool, error) { checkStorageDriver = func(major, minor int) (bool, error) {
return true, nil return true, nil
@ -562,7 +562,7 @@ func TestMountSharedDirMounts(t *testing.T) {
// create a new shared directory for our test: // create a new shared directory for our test:
kataHostSharedDirSaved := kataHostSharedDir kataHostSharedDirSaved := kataHostSharedDir
testHostDir, err := ioutil.TempDir("", "kata-cleanup") testHostDir, err := ioutil.TempDir("", "kata-Cleanup")
assert.NoError(err) assert.NoError(err)
kataHostSharedDir = func() string { kataHostSharedDir = func() string {
return testHostDir return testHostDir

View File

@ -87,7 +87,7 @@ func TestIncorrectEndpointTypeString(t *testing.T) {
func TestSaveLoadIfPair(t *testing.T) { func TestSaveLoadIfPair(t *testing.T) {
macAddr := net.HardwareAddr{0x02, 0x00, 0xCA, 0xFE, 0x00, 0x04} macAddr := net.HardwareAddr{0x02, 0x00, 0xCA, 0xFE, 0x00, 0x04}
tmpfile, err := ioutil.TempFile("", "vc-save-load-net-") tmpfile, err := ioutil.TempFile("", "vc-Save-Load-net-")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(tmpfile.Name()) defer os.Remove(tmpfile.Name())
@ -109,7 +109,7 @@ func TestSaveLoadIfPair(t *testing.T) {
NetInterworkingModel: DefaultNetInterworkingModel, NetInterworkingModel: DefaultNetInterworkingModel,
} }
// Save to disk then load it back. // Save to disk then Load it back.
savedIfPair := saveNetIfPair(netPair) savedIfPair := saveNetIfPair(netPair)
loadedIfPair := loadNetIfPair(savedIfPair) loadedIfPair := loadNetIfPair(savedIfPair)

View File

@ -62,7 +62,7 @@ const (
fcTimeout = 10 fcTimeout = 10
fcSocket = "firecracker.socket" fcSocket = "firecracker.socket"
//Name of the files within jailer root //Name of the files within jailer root
//Having predefined names helps with cleanup //Having predefined names helps with Cleanup
fcKernel = "vmlinux" fcKernel = "vmlinux"
fcRootfs = "rootfs" fcRootfs = "rootfs"
fcStopSandboxTimeout = 15 fcStopSandboxTimeout = 15
@ -206,7 +206,7 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
span, _ := katatrace.Trace(ctx, fc.Logger(), "createSandbox", fcTracingTags, map[string]string{"sandbox_id": fc.id}) span, _ := katatrace.Trace(ctx, fc.Logger(), "createSandbox", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End() defer span.End()
//TODO: check validity of the hypervisor config provided //TODO: Check validity of the hypervisor config provided
//https://github.com/kata-containers/runtime/issues/1065 //https://github.com/kata-containers/runtime/issues/1065
fc.id = fc.truncateID(id) fc.id = fc.truncateID(id)
fc.state.set(notReady) fc.state.set(notReady)
@ -303,7 +303,7 @@ func (fc *firecracker) getVersionNumber() (string, error) {
func (fc *firecracker) parseVersion(data string) (string, error) { func (fc *firecracker) parseVersion(data string) (string, error) {
// Firecracker versions 0.25 and over contains multiline output on "version" command. // Firecracker versions 0.25 and over contains multiline output on "version" command.
// So we have to check it and use first line of output to parse version. // So we have to Check it and use first line of output to parse version.
lines := strings.Split(data, "\n") lines := strings.Split(data, "\n")
var version string var version string
@ -359,7 +359,7 @@ func (fc *firecracker) fcInit(ctx context.Context, timeout int) error {
defer span.End() defer span.End()
var err error var err error
//FC version set and check //FC version set and Check
if fc.info.Version, err = fc.getVersionNumber(); err != nil { if fc.info.Version, err = fc.getVersionNumber(); err != nil {
return err return err
} }
@ -751,7 +751,7 @@ func (fc *firecracker) fcInitConfiguration(ctx context.Context) error {
fc.state.set(cfReady) fc.state.set(cfReady)
for _, d := range fc.pendingDevices { for _, d := range fc.pendingDevices {
if err := fc.addDevice(ctx, d.dev, d.devType); err != nil { if err := fc.AddDevice(ctx, d.dev, d.devType); err != nil {
return err return err
} }
} }
@ -1023,8 +1023,8 @@ func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string)
// addDevice will add extra devices to firecracker. Limited to configure before the // addDevice will add extra devices to firecracker. Limited to configure before the
// virtual machine starts. Devices include drivers and network interfaces only. // virtual machine starts. Devices include drivers and network interfaces only.
func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error { func (fc *firecracker) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
span, _ := katatrace.Trace(ctx, fc.Logger(), "addDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id}) span, _ := katatrace.Trace(ctx, fc.Logger(), "AddDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End() defer span.End()
fc.state.RLock() fc.state.RLock()
@ -1093,8 +1093,8 @@ func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.Bloc
} }
// hotplugAddDevice supported in Firecracker VMM // hotplugAddDevice supported in Firecracker VMM
func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (fc *firecracker) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "hotplugAddDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id}) span, _ := katatrace.Trace(ctx, fc.Logger(), "HotplugAddDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End() defer span.End()
switch devType { switch devType {
@ -1102,15 +1102,15 @@ func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), AddDevice) return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), AddDevice)
default: default:
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo, fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
"deviceType": devType}).Warn("hotplugAddDevice: unsupported device") "deviceType": devType}).Warn("HotplugAddDevice: unsupported device")
return nil, fmt.Errorf("Could not hot add device: unsupported device: %v, type: %v", return nil, fmt.Errorf("Could not hot add device: unsupported device: %v, type: %v",
devInfo, devType) devInfo, devType)
} }
} }
// hotplugRemoveDevice supported in Firecracker VMM // hotplugRemoveDevice supported in Firecracker VMM
func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (fc *firecracker) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, _ := katatrace.Trace(ctx, fc.Logger(), "hotplugRemoveDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id}) span, _ := katatrace.Trace(ctx, fc.Logger(), "HotplugRemoveDevice", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End() defer span.End()
switch devType { switch devType {
@ -1118,7 +1118,7 @@ func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interfac
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), RemoveDevice) return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), RemoveDevice)
default: default:
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo, fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
"deviceType": devType}).Error("hotplugRemoveDevice: unsupported device") "deviceType": devType}).Error("HotplugRemoveDevice: unsupported device")
return nil, fmt.Errorf("Could not hot remove device: unsupported device: %v, type: %v", return nil, fmt.Errorf("Could not hot remove device: unsupported device: %v, type: %v",
devInfo, devType) devInfo, devType)
} }
@ -1126,7 +1126,7 @@ func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interfac
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (fc *firecracker) getSandboxConsole(ctx context.Context, id string) (string, string, error) { func (fc *firecracker) GetSandboxConsole(ctx context.Context, id string) (string, string, error) {
master, slave, err := console.NewPty() master, slave, err := console.NewPty()
if err != nil { if err != nil {
fc.Logger().Debugf("Error create pseudo tty: %v", err) fc.Logger().Debugf("Error create pseudo tty: %v", err)
@ -1137,13 +1137,13 @@ func (fc *firecracker) getSandboxConsole(ctx context.Context, id string) (string
return consoleProtoPty, slave, nil return consoleProtoPty, slave, nil
} }
func (fc *firecracker) disconnect(ctx context.Context) { func (fc *firecracker) Disconnect(ctx context.Context) {
fc.state.set(notReady) fc.state.set(notReady)
} }
// Adds all capabilities supported by firecracker implementation of hypervisor interface // Adds all capabilities supported by firecracker implementation of hypervisor interface
func (fc *firecracker) capabilities(ctx context.Context) types.Capabilities { func (fc *firecracker) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, fc.Logger(), "capabilities", fcTracingTags, map[string]string{"sandbox_id": fc.id}) span, _ := katatrace.Trace(ctx, fc.Logger(), "Capabilities", fcTracingTags, map[string]string{"sandbox_id": fc.id})
defer span.End() defer span.End()
var caps types.Capabilities var caps types.Capabilities
caps.SetBlockDeviceHotplugSupport() caps.SetBlockDeviceHotplugSupport()
@ -1151,15 +1151,15 @@ func (fc *firecracker) capabilities(ctx context.Context) types.Capabilities {
return caps return caps
} }
func (fc *firecracker) hypervisorConfig() HypervisorConfig { func (fc *firecracker) HypervisorConfig() HypervisorConfig {
return fc.config return fc.config
} }
func (fc *firecracker) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) { func (fc *firecracker) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
return 0, MemoryDevice{}, nil return 0, MemoryDevice{}, nil
} }
func (fc *firecracker) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (fc *firecracker) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
return 0, 0, nil return 0, 0, nil
} }
@ -1167,7 +1167,7 @@ func (fc *firecracker) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (curren
// //
// As suggested by https://github.com/firecracker-microvm/firecracker/issues/718, // As suggested by https://github.com/firecracker-microvm/firecracker/issues/718,
// let's use `ps -T -p <pid>` to get fc vcpu info. // let's use `ps -T -p <pid>` to get fc vcpu info.
func (fc *firecracker) getThreadIDs(ctx context.Context) (VcpuThreadIDs, error) { func (fc *firecracker) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
var vcpuInfo VcpuThreadIDs var vcpuInfo VcpuThreadIDs
vcpuInfo.vcpus = make(map[int]int) vcpuInfo.vcpus = make(map[int]int)
@ -1205,16 +1205,16 @@ func (fc *firecracker) getThreadIDs(ctx context.Context) (VcpuThreadIDs, error)
return vcpuInfo, nil return vcpuInfo, nil
} }
func (fc *firecracker) cleanup(ctx context.Context) error { func (fc *firecracker) Cleanup(ctx context.Context) error {
fc.cleanupJail(ctx) fc.cleanupJail(ctx)
return nil return nil
} }
func (fc *firecracker) getPids() []int { func (fc *firecracker) GetPids() []int {
return []int{fc.info.PID} return []int{fc.info.PID}
} }
func (fc *firecracker) getVirtioFsPid() *int { func (fc *firecracker) GetVirtioFsPid() *int {
return nil return nil
} }
@ -1226,17 +1226,17 @@ func (fc *firecracker) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("firecracker is not supported by VM cache") return nil, errors.New("firecracker is not supported by VM cache")
} }
func (fc *firecracker) save() (s persistapi.HypervisorState) { func (fc *firecracker) Save() (s persistapi.HypervisorState) {
s.Pid = fc.info.PID s.Pid = fc.info.PID
s.Type = string(FirecrackerHypervisor) s.Type = string(FirecrackerHypervisor)
return return
} }
func (fc *firecracker) load(s persistapi.HypervisorState) { func (fc *firecracker) Load(s persistapi.HypervisorState) {
fc.info.PID = s.Pid fc.info.PID = s.Pid
} }
func (fc *firecracker) check() error { func (fc *firecracker) Check() error {
if err := syscall.Kill(fc.info.PID, syscall.Signal(0)); err != nil { if err := syscall.Kill(fc.info.PID, syscall.Signal(0)); err != nil {
return errors.Wrapf(err, "failed to ping fc process") return errors.Wrapf(err, "failed to ping fc process")
} }
@ -1244,7 +1244,7 @@ func (fc *firecracker) check() error {
return nil return nil
} }
func (fc *firecracker) generateSocket(id string) (interface{}, error) { func (fc *firecracker) GenerateSocket(id string) (interface{}, error) {
fc.Logger().Debug("Using hybrid-vsock endpoint") fc.Logger().Debug("Using hybrid-vsock endpoint")
// Method is being run outside of the normal container workflow // Method is being run outside of the normal container workflow
@ -1259,7 +1259,7 @@ func (fc *firecracker) generateSocket(id string) (interface{}, error) {
}, nil }, nil
} }
func (fc *firecracker) isRateLimiterBuiltin() bool { func (fc *firecracker) IsRateLimiterBuiltin() bool {
return true return true
} }

View File

@ -472,7 +472,7 @@ type PerformanceMetrics struct {
FullCreateSnapshot uint64 `json:"full_create_snapshot"` FullCreateSnapshot uint64 `json:"full_create_snapshot"`
// Measures the snapshot diff create time, at the API (user) level, in microseconds. // Measures the snapshot diff create time, at the API (user) level, in microseconds.
DiffCreateSnapshot uint64 `json:"diff_create_snapshot"` DiffCreateSnapshot uint64 `json:"diff_create_snapshot"`
// Measures the snapshot load time, at the API (user) level, in microseconds. // Measures the snapshot Load time, at the API (user) level, in microseconds.
LoadSnapshot uint64 `json:"load_snapshot"` LoadSnapshot uint64 `json:"load_snapshot"`
// Measures the microVM pausing duration, at the API (user) level, in microseconds. // Measures the microVM pausing duration, at the API (user) level, in microseconds.
PauseVM uint64 `json:"pause_vm"` PauseVM uint64 `json:"pause_vm"`
@ -482,7 +482,7 @@ type PerformanceMetrics struct {
VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"` VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"`
// Measures the snapshot diff create time, at the VMM level, in microseconds. // Measures the snapshot diff create time, at the VMM level, in microseconds.
VmmDiffCreateSnapshot uint64 `json:"vmm_diff_create_snapshot"` VmmDiffCreateSnapshot uint64 `json:"vmm_diff_create_snapshot"`
// Measures the snapshot load time, at the VMM level, in microseconds. // Measures the snapshot Load time, at the VMM level, in microseconds.
VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"` VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"`
// Measures the microVM pausing duration, at the VMM level, in microseconds. // Measures the microVM pausing duration, at the VMM level, in microseconds.
VmmPauseVM uint64 `json:"vmm_pause_vm"` VmmPauseVM uint64 `json:"vmm_pause_vm"`

View File

@ -17,7 +17,7 @@ func TestFCGenerateSocket(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
fc := firecracker{} fc := firecracker{}
i, err := fc.generateSocket("a") i, err := fc.GenerateSocket("a")
assert.NoError(err) assert.NoError(err)
assert.NotNil(i) assert.NotNil(i)

View File

@ -235,7 +235,7 @@ func GetHypervisorSocketTemplate(hType HypervisorType, config *HypervisorConfig)
// Tag that is used to represent the name of a sandbox // Tag that is used to represent the name of a sandbox
const sandboxID = "{ID}" const sandboxID = "{ID}"
socket, err := hypervisor.generateSocket(sandboxID) socket, err := hypervisor.GenerateSocket(sandboxID)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -529,7 +529,7 @@ func (conf *HypervisorConfig) CheckTemplateConfig() error {
} }
if conf.BootFromTemplate && conf.DevicesStatePath == "" { if conf.BootFromTemplate && conf.DevicesStatePath == "" {
return fmt.Errorf("Missing DevicesStatePath to load from vm template") return fmt.Errorf("Missing DevicesStatePath to Load from vm template")
} }
} }
@ -780,7 +780,7 @@ func GetHostMemorySizeKb(memInfoPath string) (uint64, error) {
// CheckCmdline checks whether an option or parameter is present in the kernel command line. // CheckCmdline checks whether an option or parameter is present in the kernel command line.
// Search is case-insensitive. // Search is case-insensitive.
// Takes path to file that contains the kernel command line, desired option, and permitted values // Takes path to file that contains the kernel command line, desired option, and permitted values
// (empty values to check for options). // (empty values to Check for options).
func CheckCmdline(kernelCmdlinePath, searchParam string, searchValues []string) (bool, error) { func CheckCmdline(kernelCmdlinePath, searchParam string, searchValues []string) (bool, error) {
f, err := os.Open(kernelCmdlinePath) f, err := os.Open(kernelCmdlinePath)
if err != nil { if err != nil {
@ -788,8 +788,8 @@ func CheckCmdline(kernelCmdlinePath, searchParam string, searchValues []string)
} }
defer f.Close() defer f.Close()
// Create check function -- either check for verbatim option // Create Check function -- either Check for verbatim option
// or check for parameter and permitted values // or Check for parameter and permitted values
var check func(string, string, []string) bool var check func(string, string, []string) bool
if len(searchValues) == 0 { if len(searchValues) == 0 {
check = func(option, searchParam string, _ []string) bool { check = func(option, searchParam string, _ []string) bool {
@ -873,7 +873,7 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
} }
func GetHypervisorPid(h hypervisor) int { func GetHypervisorPid(h hypervisor) int {
pids := h.getPids() pids := h.GetPids()
if len(pids) == 0 { if len(pids) == 0 {
return 0 return 0
} }
@ -905,33 +905,33 @@ type hypervisor interface {
pauseSandbox(ctx context.Context) error pauseSandbox(ctx context.Context) error
saveSandbox() error saveSandbox() error
resumeSandbox(ctx context.Context) error resumeSandbox(ctx context.Context) error
addDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error
hotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error)
hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error)
resizeMemory(ctx context.Context, memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) ResizeMemory(ctx context.Context, memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error)
resizeVCPUs(ctx context.Context, vcpus uint32) (uint32, uint32, error) ResizeVCPUs(ctx context.Context, vcpus uint32) (uint32, uint32, error)
getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error) GetSandboxConsole(ctx context.Context, sandboxID string) (string, string, error)
disconnect(ctx context.Context) Disconnect(ctx context.Context)
capabilities(ctx context.Context) types.Capabilities Capabilities(ctx context.Context) types.Capabilities
hypervisorConfig() HypervisorConfig HypervisorConfig() HypervisorConfig
getThreadIDs(ctx context.Context) (VcpuThreadIDs, error) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error)
cleanup(ctx context.Context) error Cleanup(ctx context.Context) error
// getPids returns a slice of hypervisor related process ids. // getPids returns a slice of hypervisor related process ids.
// The hypervisor pid must be put at index 0. // The hypervisor pid must be put at index 0.
getPids() []int GetPids() []int
getVirtioFsPid() *int GetVirtioFsPid() *int
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error
toGrpc(ctx context.Context) ([]byte, error) toGrpc(ctx context.Context) ([]byte, error)
check() error Check() error
save() persistapi.HypervisorState Save() persistapi.HypervisorState
load(persistapi.HypervisorState) Load(persistapi.HypervisorState)
// generate the socket to communicate the host and guest // generate the socket to communicate the host and guest
generateSocket(id string) (interface{}, error) GenerateSocket(id string) (interface{}, error)
// check if hypervisor supports built-in rate limiter. // check if hypervisor supports built-in rate limiter.
isRateLimiterBuiltin() bool IsRateLimiterBuiltin() bool
setSandbox(sandbox *Sandbox) setSandbox(sandbox *Sandbox)
} }

View File

@ -103,7 +103,7 @@ func (endpoint *IPVlanEndpoint) Attach(ctx context.Context, s *Sandbox) error {
return err return err
} }
return h.addDevice(ctx, endpoint, NetDev) return h.AddDevice(ctx, endpoint, NetDev)
} }
// Detach for the ipvlan endpoint tears down the tap and bridge // Detach for the ipvlan endpoint tears down the tap and bridge

View File

@ -297,7 +297,7 @@ func (k *kataAgent) handleTraceSettings(config KataAgentConfig) bool {
} }
func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgentConfig) (disableVMShutdown bool, err error) { func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgentConfig) (disableVMShutdown bool, err error) {
// save // Save
k.ctx = sandbox.ctx k.ctx = sandbox.ctx
span, _ := katatrace.Trace(ctx, k.Logger(), "init", kataAgentTracingTags) span, _ := katatrace.Trace(ctx, k.Logger(), "init", kataAgentTracingTags)
@ -327,7 +327,7 @@ func (k *kataAgent) agentURL() (string, error) {
func (k *kataAgent) capabilities() types.Capabilities { func (k *kataAgent) capabilities() types.Capabilities {
var caps types.Capabilities var caps types.Capabilities
// add all capabilities supported by agent // add all Capabilities supported by agent
caps.SetBlockDeviceSupport() caps.SetBlockDeviceSupport()
return caps return caps
@ -338,7 +338,7 @@ func (k *kataAgent) internalConfigure(ctx context.Context, h hypervisor, id stri
defer span.End() defer span.End()
var err error var err error
if k.vmSocket, err = h.generateSocket(id); err != nil { if k.vmSocket, err = h.GenerateSocket(id); err != nil {
return err return err
} }
k.keepConn = config.LongLiveConn k.keepConn = config.LongLiveConn
@ -367,11 +367,11 @@ func (k *kataAgent) setupSandboxBindMounts(ctx context.Context, sandbox *Sandbox
if err != nil { if err != nil {
for _, mnt := range mountedList { for _, mnt := range mountedList {
if derr := syscall.Unmount(mnt, syscall.MNT_DETACH|UmountNoFollow); derr != nil { if derr := syscall.Unmount(mnt, syscall.MNT_DETACH|UmountNoFollow); derr != nil {
k.Logger().WithError(derr).Errorf("cleanup: couldn't unmount %s", mnt) k.Logger().WithError(derr).Errorf("Cleanup: couldn't unmount %s", mnt)
} }
} }
if derr := os.RemoveAll(sandboxMountDir); derr != nil { if derr := os.RemoveAll(sandboxMountDir); derr != nil {
k.Logger().WithError(derr).Errorf("cleanup: failed to remove %s", sandboxMountDir) k.Logger().WithError(derr).Errorf("Cleanup: failed to remove %s", sandboxMountDir)
} }
} }
@ -432,11 +432,11 @@ func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath s
switch s := k.vmSocket.(type) { switch s := k.vmSocket.(type) {
case types.VSock: case types.VSock:
if err = h.addDevice(ctx, s, VSockPCIDev); err != nil { if err = h.AddDevice(ctx, s, VSockPCIDev); err != nil {
return err return err
} }
case types.HybridVSock: case types.HybridVSock:
err = h.addDevice(ctx, s, HybridVirtioVsockDev) err = h.AddDevice(ctx, s, HybridVirtioVsockDev)
if err != nil { if err != nil {
return err return err
} }
@ -447,7 +447,7 @@ func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath s
// Neither create shared directory nor add 9p device if hypervisor // Neither create shared directory nor add 9p device if hypervisor
// doesn't support filesystem sharing. // doesn't support filesystem sharing.
caps := h.capabilities(ctx) caps := h.Capabilities(ctx)
if !caps.IsFsSharingSupported() { if !caps.IsFsSharingSupported() {
return nil return nil
} }
@ -463,7 +463,7 @@ func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath s
return err return err
} }
return h.addDevice(ctx, sharedVolume, FsDev) return h.AddDevice(ctx, sharedVolume, FsDev)
} }
func (k *kataAgent) configureFromGrpc(ctx context.Context, h hypervisor, id string, config KataAgentConfig) error { func (k *kataAgent) configureFromGrpc(ctx context.Context, h hypervisor, id string, config KataAgentConfig) error {
@ -781,7 +781,7 @@ func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
return err return err
} }
// check grpc server is serving // Check grpc server is serving
if err = k.check(ctx); err != nil { if err = k.check(ctx); err != nil {
return err return err
} }
@ -853,7 +853,7 @@ func setupKernelModules(kmodules []string) []*grpc.KernelModule {
func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage { func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage {
storages := []*grpc.Storage{} storages := []*grpc.Storage{}
caps := sandbox.hypervisor.capabilities(ctx) caps := sandbox.hypervisor.Capabilities(ctx)
// append 9p shared volume to storages only if filesystem sharing is supported // append 9p shared volume to storages only if filesystem sharing is supported
if caps.IsFsSharingSupported() { if caps.IsFsSharingSupported() {
@ -1849,7 +1849,7 @@ func (k *kataAgent) connect(ctx context.Context) error {
} }
func (k *kataAgent) disconnect(ctx context.Context) error { func (k *kataAgent) disconnect(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, k.Logger(), "disconnect", kataAgentTracingTags) span, _ := katatrace.Trace(ctx, k.Logger(), "Disconnect", kataAgentTracingTags)
defer span.End() defer span.End()
k.Lock() k.Lock()
@ -1873,7 +1873,7 @@ func (k *kataAgent) disconnect(ctx context.Context) error {
func (k *kataAgent) check(ctx context.Context) error { func (k *kataAgent) check(ctx context.Context) error {
_, err := k.sendReq(ctx, &grpc.CheckRequest{}) _, err := k.sendReq(ctx, &grpc.CheckRequest{})
if err != nil { if err != nil {
err = fmt.Errorf("Failed to check if grpc server is working: %s", err) err = fmt.Errorf("Failed to Check if grpc server is working: %s", err)
} }
return err return err
} }
@ -2200,12 +2200,12 @@ func (k *kataAgent) markDead(ctx context.Context) {
func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) { func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
if err := k.cleanupSandboxBindMounts(s); err != nil { if err := k.cleanupSandboxBindMounts(s); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup sandbox bindmounts") k.Logger().WithError(err).Errorf("failed to Cleanup sandbox bindmounts")
} }
// Unmount shared path // Unmount shared path
path := getSharePath(s.id) path := getSharePath(s.id)
k.Logger().WithField("path", path).Infof("cleanup agent") k.Logger().WithField("path", path).Infof("Cleanup agent")
if err := syscall.Unmount(path, syscall.MNT_DETACH|UmountNoFollow); err != nil { if err := syscall.Unmount(path, syscall.MNT_DETACH|UmountNoFollow); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount vm share path %s", path) k.Logger().WithError(err).Errorf("failed to unmount vm share path %s", path)
} }
@ -2216,7 +2216,7 @@ func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path) k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path)
} }
if err := os.RemoveAll(getSandboxPath(s.id)); err != nil { if err := os.RemoveAll(getSandboxPath(s.id)); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup vm path %s", getSandboxPath(s.id)) k.Logger().WithError(err).Errorf("failed to Cleanup vm path %s", getSandboxPath(s.id))
} }
} }

View File

@ -591,7 +591,7 @@ func TestConstraintGRPCSpec(t *testing.T) {
k := kataAgent{} k := kataAgent{}
k.constraintGRPCSpec(g, true) k.constraintGRPCSpec(g, true)
// check nil fields // Check nil fields
assert.Nil(g.Hooks) assert.Nil(g.Hooks)
assert.NotNil(g.Linux.Seccomp) assert.NotNil(g.Linux.Seccomp)
assert.Nil(g.Linux.Resources.Devices) assert.Nil(g.Linux.Resources.Devices)
@ -603,17 +603,17 @@ func TestConstraintGRPCSpec(t *testing.T) {
assert.NotNil(g.Linux.Resources.CPU) assert.NotNil(g.Linux.Resources.CPU)
assert.Equal(g.Process.SelinuxLabel, "") assert.Equal(g.Process.SelinuxLabel, "")
// check namespaces // Check namespaces
assert.Len(g.Linux.Namespaces, 1) assert.Len(g.Linux.Namespaces, 1)
assert.Empty(g.Linux.Namespaces[0].Path) assert.Empty(g.Linux.Namespaces[0].Path)
// check mounts // Check mounts
assert.Len(g.Mounts, 1) assert.Len(g.Mounts, 1)
// check cgroup path // Check cgroup path
assert.Equal(expectedCgroupPath, g.Linux.CgroupsPath) assert.Equal(expectedCgroupPath, g.Linux.CgroupsPath)
// check Linux devices // Check Linux devices
assert.Empty(g.Linux.Devices) assert.Empty(g.Linux.Devices)
} }
@ -966,7 +966,7 @@ func TestKataCleanupSandbox(t *testing.T) {
kataHostSharedDirSaved := kataHostSharedDir kataHostSharedDirSaved := kataHostSharedDir
kataHostSharedDir = func() string { kataHostSharedDir = func() string {
td, _ := ioutil.TempDir("", "kata-cleanup") td, _ := ioutil.TempDir("", "kata-Cleanup")
return td return td
} }
defer func() { defer func() {
@ -1123,7 +1123,7 @@ func TestSandboxBindMount(t *testing.T) {
// create a new shared directory for our test: // create a new shared directory for our test:
kataHostSharedDirSaved := kataHostSharedDir kataHostSharedDirSaved := kataHostSharedDir
testHostDir, err := ioutil.TempDir("", "kata-cleanup") testHostDir, err := ioutil.TempDir("", "kata-Cleanup")
assert.NoError(err) assert.NoError(err)
kataHostSharedDir = func() string { kataHostSharedDir = func() string {
return testHostDir return testHostDir
@ -1175,11 +1175,11 @@ func TestSandboxBindMount(t *testing.T) {
err = k.setupSandboxBindMounts(context.Background(), sandbox) err = k.setupSandboxBindMounts(context.Background(), sandbox)
assert.NoError(err) assert.NoError(err)
// Test the cleanup function. We expect it to succeed for the mount to be removed. // Test the Cleanup function. We expect it to succeed for the mount to be removed.
err = k.cleanupSandboxBindMounts(sandbox) err = k.cleanupSandboxBindMounts(sandbox)
assert.NoError(err) assert.NoError(err)
// After successful cleanup, verify there are not any mounts left behind. // After successful Cleanup, verify there are not any mounts left behind.
stat := syscall.Stat_t{} stat := syscall.Stat_t{}
mount1CheckPath := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir, filepath.Base(m1Path)) mount1CheckPath := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir, filepath.Base(m1Path))
err = syscall.Stat(mount1CheckPath, &stat) err = syscall.Stat(mount1CheckPath, &stat)
@ -1191,16 +1191,16 @@ func TestSandboxBindMount(t *testing.T) {
assert.Error(err) assert.Error(err)
assert.True(os.IsNotExist(err)) assert.True(os.IsNotExist(err))
// Now, let's setup the cleanup to fail. Setup the sandbox bind mount twice, which will result in // Now, let's setup the Cleanup to fail. Setup the sandbox bind mount twice, which will result in
// extra mounts being present that the sandbox description doesn't account for (ie, duplicate mounts). // extra mounts being present that the sandbox description doesn't account for (ie, duplicate mounts).
// We expect cleanup to fail on the first time, since it cannot remove the sandbox-bindmount directory because // We expect Cleanup to fail on the first time, since it cannot remove the sandbox-bindmount directory because
// there are leftover mounts. If we run it a second time, however, it should succeed since it'll remove the // there are leftover mounts. If we run it a second time, however, it should succeed since it'll remove the
// second set of mounts: // second set of mounts:
err = k.setupSandboxBindMounts(context.Background(), sandbox) err = k.setupSandboxBindMounts(context.Background(), sandbox)
assert.NoError(err) assert.NoError(err)
err = k.setupSandboxBindMounts(context.Background(), sandbox) err = k.setupSandboxBindMounts(context.Background(), sandbox)
assert.NoError(err) assert.NoError(err)
// Test the cleanup function. We expect it to succeed for the mount to be removed. // Test the Cleanup function. We expect it to succeed for the mount to be removed.
err = k.cleanupSandboxBindMounts(sandbox) err = k.cleanupSandboxBindMounts(sandbox)
assert.Error(err) assert.Error(err)
err = k.cleanupSandboxBindMounts(sandbox) err = k.cleanupSandboxBindMounts(sandbox)

View File

@ -69,20 +69,20 @@ func (endpoint *MacvtapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
h := s.hypervisor h := s.hypervisor
endpoint.VMFds, err = createMacvtapFds(endpoint.EndpointProperties.Iface.Index, int(h.hypervisorConfig().NumVCPUs)) endpoint.VMFds, err = createMacvtapFds(endpoint.EndpointProperties.Iface.Index, int(h.HypervisorConfig().NumVCPUs))
if err != nil { if err != nil {
return fmt.Errorf("Could not setup macvtap fds %s: %s", endpoint.EndpointProperties.Iface.Name, err) return fmt.Errorf("Could not setup macvtap fds %s: %s", endpoint.EndpointProperties.Iface.Name, err)
} }
if !h.hypervisorConfig().DisableVhostNet { if !h.HypervisorConfig().DisableVhostNet {
vhostFds, err := createVhostFds(int(h.hypervisorConfig().NumVCPUs)) vhostFds, err := createVhostFds(int(h.HypervisorConfig().NumVCPUs))
if err != nil { if err != nil {
return fmt.Errorf("Could not setup vhost fds %s : %s", endpoint.EndpointProperties.Iface.Name, err) return fmt.Errorf("Could not setup vhost fds %s : %s", endpoint.EndpointProperties.Iface.Name, err)
} }
endpoint.VhostFds = vhostFds endpoint.VhostFds = vhostFds
} }
return h.addDevice(ctx, endpoint, NetDev) return h.AddDevice(ctx, endpoint, NetDev)
} }
// Detach for macvtap endpoint does nothing. // Detach for macvtap endpoint does nothing.

View File

@ -20,13 +20,13 @@ type mockHypervisor struct {
mockPid int mockPid int
} }
func (m *mockHypervisor) capabilities(ctx context.Context) types.Capabilities { func (m *mockHypervisor) Capabilities(ctx context.Context) types.Capabilities {
caps := types.Capabilities{} caps := types.Capabilities{}
caps.SetFsSharingSupport() caps.SetFsSharingSupport()
return caps return caps
} }
func (m *mockHypervisor) hypervisorConfig() HypervisorConfig { func (m *mockHypervisor) HypervisorConfig() HypervisorConfig {
return HypervisorConfig{} return HypervisorConfig{}
} }
@ -66,11 +66,11 @@ func (m *mockHypervisor) saveSandbox() error {
return nil return nil
} }
func (m *mockHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error { func (m *mockHypervisor) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
return nil return nil
} }
func (m *mockHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (m *mockHypervisor) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
switch devType { switch devType {
case CpuDev: case CpuDev:
return devInfo.(uint32), nil return devInfo.(uint32), nil
@ -81,7 +81,7 @@ func (m *mockHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface
return nil, nil return nil, nil
} }
func (m *mockHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (m *mockHypervisor) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
switch devType { switch devType {
case CpuDev: case CpuDev:
return devInfo.(uint32), nil return devInfo.(uint32), nil
@ -91,34 +91,34 @@ func (m *mockHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interf
return nil, nil return nil, nil
} }
func (m *mockHypervisor) getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error) { func (m *mockHypervisor) GetSandboxConsole(ctx context.Context, sandboxID string) (string, string, error) {
return "", "", nil return "", "", nil
} }
func (m *mockHypervisor) resizeMemory(ctx context.Context, memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, MemoryDevice, error) { func (m *mockHypervisor) ResizeMemory(ctx context.Context, memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
return 0, MemoryDevice{}, nil return 0, MemoryDevice{}, nil
} }
func (m *mockHypervisor) resizeVCPUs(ctx context.Context, cpus uint32) (uint32, uint32, error) { func (m *mockHypervisor) ResizeVCPUs(ctx context.Context, cpus uint32) (uint32, uint32, error) {
return 0, 0, nil return 0, 0, nil
} }
func (m *mockHypervisor) disconnect(ctx context.Context) { func (m *mockHypervisor) Disconnect(ctx context.Context) {
} }
func (m *mockHypervisor) getThreadIDs(ctx context.Context) (VcpuThreadIDs, error) { func (m *mockHypervisor) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
vcpus := map[int]int{0: os.Getpid()} vcpus := map[int]int{0: os.Getpid()}
return VcpuThreadIDs{vcpus}, nil return VcpuThreadIDs{vcpus}, nil
} }
func (m *mockHypervisor) cleanup(ctx context.Context) error { func (m *mockHypervisor) Cleanup(ctx context.Context) error {
return nil return nil
} }
func (m *mockHypervisor) getPids() []int { func (m *mockHypervisor) GetPids() []int {
return []int{m.mockPid} return []int{m.mockPid}
} }
func (m *mockHypervisor) getVirtioFsPid() *int { func (m *mockHypervisor) GetVirtioFsPid() *int {
return nil return nil
} }
@ -130,23 +130,23 @@ func (m *mockHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("mockHypervisor is not supported by VM cache") return nil, errors.New("mockHypervisor is not supported by VM cache")
} }
func (m *mockHypervisor) save() (s persistapi.HypervisorState) { func (m *mockHypervisor) Save() (s persistapi.HypervisorState) {
return return
} }
func (m *mockHypervisor) load(s persistapi.HypervisorState) {} func (m *mockHypervisor) Load(s persistapi.HypervisorState) {}
func (m *mockHypervisor) check() error { func (m *mockHypervisor) Check() error {
return nil return nil
} }
func (m *mockHypervisor) generateSocket(id string) (interface{}, error) { func (m *mockHypervisor) GenerateSocket(id string) (interface{}, error) {
return types.MockHybridVSock{ return types.MockHybridVSock{
UdsPath: MockHybridVSockPath, UdsPath: MockHybridVSockPath,
}, nil }, nil
} }
func (m *mockHypervisor) isRateLimiterBuiltin() bool { func (m *mockHypervisor) IsRateLimiterBuiltin() bool {
return false return false
} }

View File

@ -59,7 +59,7 @@ func TestMockHypervisorStopSandbox(t *testing.T) {
func TestMockHypervisorAddDevice(t *testing.T) { func TestMockHypervisorAddDevice(t *testing.T) {
var m *mockHypervisor var m *mockHypervisor
assert.NoError(t, m.addDevice(context.Background(), nil, ImgDev)) assert.NoError(t, m.AddDevice(context.Background(), nil, ImgDev))
} }
func TestMockHypervisorGetSandboxConsole(t *testing.T) { func TestMockHypervisorGetSandboxConsole(t *testing.T) {
@ -67,7 +67,7 @@ func TestMockHypervisorGetSandboxConsole(t *testing.T) {
expected := "" expected := ""
expectedProto := "" expectedProto := ""
proto, result, err := m.getSandboxConsole(context.Background(), "testSandboxID") proto, result, err := m.GetSandboxConsole(context.Background(), "testSandboxID")
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, result, expected) assert.Equal(t, result, expected)
assert.Equal(t, proto, expectedProto) assert.Equal(t, proto, expectedProto)
@ -82,19 +82,19 @@ func TestMockHypervisorSaveSandbox(t *testing.T) {
func TestMockHypervisorDisconnect(t *testing.T) { func TestMockHypervisorDisconnect(t *testing.T) {
var m *mockHypervisor var m *mockHypervisor
m.disconnect(context.Background()) m.Disconnect(context.Background())
} }
func TestMockHypervisorCheck(t *testing.T) { func TestMockHypervisorCheck(t *testing.T) {
var m *mockHypervisor var m *mockHypervisor
assert.NoError(t, m.check()) assert.NoError(t, m.Check())
} }
func TestMockGenerateSocket(t *testing.T) { func TestMockGenerateSocket(t *testing.T) {
var m *mockHypervisor var m *mockHypervisor
i, err := m.generateSocket("a") i, err := m.GenerateSocket("a")
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, i) assert.NotNil(t, i)
} }

View File

@ -140,7 +140,7 @@ func (m *monitor) watchAgent(ctx context.Context) {
} }
func (m *monitor) watchHypervisor(ctx context.Context) error { func (m *monitor) watchHypervisor(ctx context.Context) error {
if err := m.sandbox.hypervisor.check(); err != nil { if err := m.sandbox.hypervisor.Check(); err != nil {
m.notify(ctx, errors.Wrapf(err, "failed to ping hypervisor process")) m.notify(ctx, errors.Wrapf(err, "failed to ping hypervisor process"))
return err return err
} }

View File

@ -495,7 +495,7 @@ func isSecret(path string) bool {
// files observed is greater than limit, break and return -1 // files observed is greater than limit, break and return -1
func countFiles(path string, limit int) (numFiles int, err error) { func countFiles(path string, limit int) (numFiles int, err error) {
// First, check to see if the path exists // First, Check to see if the path exists
file, err := os.Stat(path) file, err := os.Stat(path)
if os.IsNotExist(err) { if os.IsNotExist(err) {
return 0, err return 0, err
@ -531,7 +531,7 @@ func countFiles(path string, limit int) (numFiles int, err error) {
func isWatchableMount(path string) bool { func isWatchableMount(path string) bool {
if isSecret(path) || isConfigMap(path) { if isSecret(path) || isConfigMap(path) {
// we have a cap on number of FDs which can be present in mount // we have a cap on number of FDs which can be present in mount
// to determine if watchable. A similar check exists within the agent, // to determine if watchable. A similar Check exists within the agent,
// which may or may not help handle case where extra files are added to // which may or may not help handle case where extra files are added to
// a mount after the fact // a mount after the fact
count, _ := countFiles(path, 8) count, _ := countFiles(path, 8)

View File

@ -472,7 +472,7 @@ func TestBindUnmountContainerRootfsENOENTNotError(t *testing.T) {
cID := "contIDTest" cID := "contIDTest"
assert := assert.New(t) assert := assert.New(t)
// check to make sure the file doesn't exist // Check to make sure the file doesn't exist
testPath := filepath.Join(testMnt, sID, cID, rootfsDir) testPath := filepath.Join(testMnt, sID, cID, rootfsDir)
if _, err := os.Stat(testPath); !os.IsNotExist(err) { if _, err := os.Stat(testPath); !os.IsNotExist(err) {
assert.NoError(os.Remove(testPath)) assert.NoError(os.Remove(testPath))

View File

@ -59,7 +59,7 @@ const (
// NetXConnectNoneModel can be used when the VM is in the host network namespace // NetXConnectNoneModel can be used when the VM is in the host network namespace
NetXConnectNoneModel NetXConnectNoneModel
// NetXConnectInvalidModel is the last item to check valid values by IsValid() // NetXConnectInvalidModel is the last item to Check valid values by IsValid()
NetXConnectInvalidModel NetXConnectInvalidModel
) )
@ -435,16 +435,16 @@ func xConnectVMNetwork(ctx context.Context, endpoint Endpoint, h hypervisor) err
netPair := endpoint.NetworkPair() netPair := endpoint.NetworkPair()
queues := 0 queues := 0
caps := h.capabilities(ctx) caps := h.Capabilities(ctx)
if caps.IsMultiQueueSupported() { if caps.IsMultiQueueSupported() {
queues = int(h.hypervisorConfig().NumVCPUs) queues = int(h.HypervisorConfig().NumVCPUs)
} }
var disableVhostNet bool var disableVhostNet bool
if rootless.IsRootless() { if rootless.IsRootless() {
disableVhostNet = true disableVhostNet = true
} else { } else {
disableVhostNet = h.hypervisorConfig().DisableVhostNet disableVhostNet = h.HypervisorConfig().DisableVhostNet
} }
if netPair.NetInterworkingModel == NetXConnectDefaultModel { if netPair.NetInterworkingModel == NetXConnectDefaultModel {
@ -518,7 +518,7 @@ func createFds(device string, numFds int) ([]*os.File, error) {
// //
// Till that bug is fixed we need to pick a random non conflicting index and try to // Till that bug is fixed we need to pick a random non conflicting index and try to
// create a link. If that fails, we need to try with another. // create a link. If that fails, we need to try with another.
// All the kernel does not check if the link id conflicts with a link id on the host // All the kernel does not Check if the link id conflicts with a link id on the host
// hence we need to offset the link id to prevent any overlaps with the host index // hence we need to offset the link id to prevent any overlaps with the host index
// //
// Here the kernel will ensure that there is no race condition // Here the kernel will ensure that there is no race condition
@ -1356,15 +1356,15 @@ func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, ho
} }
} }
if !s.hypervisor.isRateLimiterBuiltin() { if !s.hypervisor.IsRateLimiterBuiltin() {
rxRateLimiterMaxRate := s.hypervisor.hypervisorConfig().RxRateLimiterMaxRate rxRateLimiterMaxRate := s.hypervisor.HypervisorConfig().RxRateLimiterMaxRate
if rxRateLimiterMaxRate > 0 { if rxRateLimiterMaxRate > 0 {
networkLogger().Info("Add Rx Rate Limiter") networkLogger().Info("Add Rx Rate Limiter")
if err := addRxRateLimiter(endpoint, rxRateLimiterMaxRate); err != nil { if err := addRxRateLimiter(endpoint, rxRateLimiterMaxRate); err != nil {
return err return err
} }
} }
txRateLimiterMaxRate := s.hypervisor.hypervisorConfig().TxRateLimiterMaxRate txRateLimiterMaxRate := s.hypervisor.HypervisorConfig().TxRateLimiterMaxRate
if txRateLimiterMaxRate > 0 { if txRateLimiterMaxRate > 0 {
networkLogger().Info("Add Tx Rate Limiter") networkLogger().Info("Add Tx Rate Limiter")
if err := addTxRateLimiter(endpoint, txRateLimiterMaxRate); err != nil { if err := addTxRateLimiter(endpoint, txRateLimiterMaxRate); err != nil {
@ -1560,7 +1560,7 @@ func addHTBQdisc(linkIndex int, maxRate uint64) error {
// By redirecting interface ingress traffic to ifb and treat it as egress traffic there, // By redirecting interface ingress traffic to ifb and treat it as egress traffic there,
// we could do network shaping to interface inbound traffic. // we could do network shaping to interface inbound traffic.
func addIFBDevice() (int, error) { func addIFBDevice() (int, error) {
// check whether host supports ifb // Check whether host supports ifb
if ok, err := utils.SupportsIfb(); !ok { if ok, err := utils.SupportsIfb(); !ok {
return -1, err return -1, err
} }

View File

@ -59,7 +59,7 @@ func (s *Sandbox) dumpState(ss *persistapi.SandboxState, cs map[string]persistap
} }
func (s *Sandbox) dumpHypervisor(ss *persistapi.SandboxState) { func (s *Sandbox) dumpHypervisor(ss *persistapi.SandboxState) {
ss.HypervisorState = s.hypervisor.save() ss.HypervisorState = s.hypervisor.Save()
// BlockIndexMap will be moved from sandbox state to hypervisor state later // BlockIndexMap will be moved from sandbox state to hypervisor state later
ss.HypervisorState.BlockIndexMap = s.state.BlockIndexMap ss.HypervisorState.BlockIndexMap = s.state.BlockIndexMap
} }
@ -316,7 +316,7 @@ func (c *Container) loadContState(cs persistapi.ContainerState) {
} }
func (s *Sandbox) loadHypervisor(hs persistapi.HypervisorState) { func (s *Sandbox) loadHypervisor(hs persistapi.HypervisorState) {
s.hypervisor.load(hs) s.hypervisor.Load(hs)
} }
func (s *Sandbox) loadAgent(as persistapi.AgentState) { func (s *Sandbox) loadAgent(as persistapi.AgentState) {

View File

@ -55,7 +55,7 @@ func TestSandboxRestore(t *testing.T) {
assert.Equal(sandbox.state.GuestMemoryBlockSizeMB, uint32(0)) assert.Equal(sandbox.state.GuestMemoryBlockSizeMB, uint32(0))
assert.Equal(len(sandbox.state.BlockIndexMap), 0) assert.Equal(len(sandbox.state.BlockIndexMap), 0)
// set state data and save again // set state data and Save again
sandbox.state.State = types.StateString("running") sandbox.state.State = types.StateString("running")
sandbox.state.GuestMemoryBlockSizeMB = uint32(1024) sandbox.state.GuestMemoryBlockSizeMB = uint32(1024)
sandbox.state.BlockIndexMap[2] = struct{}{} sandbox.state.BlockIndexMap[2] = struct{}{}

View File

@ -126,7 +126,7 @@ const (
// memory dump format will be set to elf // memory dump format will be set to elf
memoryDumpFormat = "elf" memoryDumpFormat = "elf"
qmpCapErrMsg = "Failed to negotiate QMP capabilities" qmpCapErrMsg = "Failed to negotiate QMP Capabilities"
qmpExecCatCmd = "exec:cat" qmpExecCatCmd = "exec:cat"
scsiControllerID = "scsi0" scsiControllerID = "scsi0"
@ -195,14 +195,14 @@ func (q *qemu) kernelParameters() string {
} }
// Adds all capabilities supported by qemu implementation of hypervisor interface // Adds all capabilities supported by qemu implementation of hypervisor interface
func (q *qemu) capabilities(ctx context.Context) types.Capabilities { func (q *qemu) Capabilities(ctx context.Context) types.Capabilities {
span, _ := katatrace.Trace(ctx, q.Logger(), "capabilities", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, _ := katatrace.Trace(ctx, q.Logger(), "Capabilities", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End() defer span.End()
return q.arch.capabilities() return q.arch.capabilities()
} }
func (q *qemu) hypervisorConfig() HypervisorConfig { func (q *qemu) HypervisorConfig() HypervisorConfig {
return q.config return q.config
} }
@ -388,7 +388,7 @@ func (q *qemu) createQmpSocket() ([]govmmQemu.QMPSocket, error) {
func (q *qemu) buildDevices(ctx context.Context, initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) { func (q *qemu) buildDevices(ctx context.Context, initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) {
var devices []govmmQemu.Device var devices []govmmQemu.Device
_, console, err := q.getSandboxConsole(ctx, q.id) _, console, err := q.GetSandboxConsole(ctx, q.id)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -926,7 +926,7 @@ func (q *qemu) waitSandbox(ctx context.Context, timeout int) error {
"qmp-major-version": ver.Major, "qmp-major-version": ver.Major,
"qmp-minor-version": ver.Minor, "qmp-minor-version": ver.Minor,
"qmp-micro-version": ver.Micro, "qmp-micro-version": ver.Micro,
"qmp-capabilities": strings.Join(ver.Capabilities, ","), "qmp-Capabilities": strings.Join(ver.Capabilities, ","),
}).Infof("QMP details") }).Infof("QMP details")
if err = q.qmpMonitorCh.qmp.ExecuteQMPCapabilities(q.qmpMonitorCh.ctx); err != nil { if err = q.qmpMonitorCh.qmp.ExecuteQMPCapabilities(q.qmpMonitorCh.ctx); err != nil {
@ -971,7 +971,7 @@ func (q *qemu) stopSandbox(ctx context.Context, waitOnly bool) error {
} }
if waitOnly { if waitOnly {
pids := q.getPids() pids := q.GetPids()
if len(pids) == 0 { if len(pids) == 0 {
return errors.New("cannot determine QEMU PID") return errors.New("cannot determine QEMU PID")
} }
@ -999,17 +999,17 @@ func (q *qemu) stopSandbox(ctx context.Context, waitOnly bool) error {
func (q *qemu) cleanupVM() error { func (q *qemu) cleanupVM() error {
// cleanup vm path // Cleanup vm path
dir := filepath.Join(q.store.RunVMStoragePath(), q.id) dir := filepath.Join(q.store.RunVMStoragePath(), q.id)
// If it's a symlink, remove both dir and the target. // If it's a symlink, remove both dir and the target.
// This can happen when vm template links a sandbox to a vm. // This can happen when vm template links a sandbox to a vm.
link, err := filepath.EvalSymlinks(dir) link, err := filepath.EvalSymlinks(dir)
if err != nil { if err != nil {
// Well, it's just cleanup failure. Let's ignore it. // Well, it's just Cleanup failure. Let's ignore it.
q.Logger().WithError(err).WithField("dir", dir).Warn("failed to resolve vm path") q.Logger().WithError(err).WithField("dir", dir).Warn("failed to resolve vm path")
} }
q.Logger().WithField("link", link).WithField("dir", dir).Infof("cleanup vm path") q.Logger().WithField("link", link).WithField("dir", dir).Infof("Cleanup vm path")
if err := os.RemoveAll(dir); err != nil { if err := os.RemoveAll(dir); err != nil {
q.Logger().WithError(err).Warnf("failed to remove vm path %s", dir) q.Logger().WithError(err).Warnf("failed to remove vm path %s", dir)
@ -1149,18 +1149,18 @@ func (q *qemu) dumpSandboxMetaInfo(dumpSavePath string) {
// copy state from /run/vc/sbs to memory dump directory // copy state from /run/vc/sbs to memory dump directory
statePath := filepath.Join(q.store.RunStoragePath(), q.id) statePath := filepath.Join(q.store.RunStoragePath(), q.id)
command := []string{"/bin/cp", "-ar", statePath, dumpStatePath} command := []string{"/bin/cp", "-ar", statePath, dumpStatePath}
q.Logger().WithField("command", command).Info("try to save sandbox state") q.Logger().WithField("command", command).Info("try to Save sandbox state")
if output, err := pkgUtils.RunCommandFull(command, true); err != nil { if output, err := pkgUtils.RunCommandFull(command, true); err != nil {
q.Logger().WithError(err).WithField("output", output).Error("failed to save state") q.Logger().WithError(err).WithField("output", output).Error("failed to Save state")
} }
// save hypervisor meta information // Save hypervisor meta information
fileName := filepath.Join(dumpSavePath, "hypervisor.conf") fileName := filepath.Join(dumpSavePath, "hypervisor.conf")
data, _ := json.MarshalIndent(q.config, "", " ") data, _ := json.MarshalIndent(q.config, "", " ")
if err := ioutil.WriteFile(fileName, data, defaultFilePerms); err != nil { if err := ioutil.WriteFile(fileName, data, defaultFilePerms); err != nil {
q.Logger().WithError(err).WithField("hypervisor.conf", data).Error("write to hypervisor.conf file failed") q.Logger().WithError(err).WithField("hypervisor.conf", data).Error("write to hypervisor.conf file failed")
} }
// save hypervisor version // Save hypervisor version
hyperVisorVersion, err := pkgUtils.RunCommand([]string{q.config.HypervisorPath, "--version"}) hyperVisorVersion, err := pkgUtils.RunCommand([]string{q.config.HypervisorPath, "--version"})
if err != nil { if err != nil {
q.Logger().WithError(err).WithField("HypervisorPath", data).Error("failed to get hypervisor version") q.Logger().WithError(err).WithField("HypervisorPath", data).Error("failed to get hypervisor version")
@ -1188,11 +1188,11 @@ func (q *qemu) dumpGuestMemory(dumpSavePath string) error {
return err return err
} }
// save meta information for sandbox // Save meta information for sandbox
q.dumpSandboxMetaInfo(dumpSavePath) q.dumpSandboxMetaInfo(dumpSavePath)
q.Logger().Info("dump sandbox meta information completed") q.Logger().Info("dump sandbox meta information completed")
// check device free space and estimated dump size // Check device free space and estimated dump size
if err := q.canDumpGuestMemory(dumpSavePath); err != nil { if err := q.canDumpGuestMemory(dumpSavePath); err != nil {
q.Logger().Warnf("can't dump guest memory: %s", err.Error()) q.Logger().Warnf("can't dump guest memory: %s", err.Error())
return err return err
@ -1511,7 +1511,7 @@ func (q *qemu) hotplugVFIODevice(ctx context.Context, device *config.VFIODev, op
} }
devID := device.ID devID := device.ID
machineType := q.hypervisorConfig().HypervisorMachineType machineType := q.HypervisorConfig().HypervisorMachineType
if op == AddDevice { if op == AddDevice {
@ -1723,8 +1723,8 @@ func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType D
} }
} }
func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (q *qemu) HotplugAddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, ctx := katatrace.Trace(ctx, q.Logger(), "hotplugAddDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, ctx := katatrace.Trace(ctx, q.Logger(), "HotplugAddDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
katatrace.AddTag(span, "device", devInfo) katatrace.AddTag(span, "device", devInfo)
defer span.End() defer span.End()
@ -1736,8 +1736,8 @@ func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devTyp
return data, nil return data, nil
} }
func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) { func (q *qemu) HotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType DeviceType) (interface{}, error) {
span, ctx := katatrace.Trace(ctx, q.Logger(), "hotplugRemoveDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, ctx := katatrace.Trace(ctx, q.Logger(), "HotplugRemoveDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
katatrace.AddTag(span, "device", devInfo) katatrace.AddTag(span, "device", devInfo)
defer span.End() defer span.End()
@ -1819,7 +1819,7 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
continue continue
} }
// a new vCPU was added, update list of hotplugged vCPUs and check if all vCPUs were added // a new vCPU was added, update list of hotplugged vCPUs and Check if all vCPUs were added
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, CPUDevice{cpuID}) q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, CPUDevice{cpuID})
hotpluggedVCPUs++ hotpluggedVCPUs++
if hotpluggedVCPUs == amount { if hotpluggedVCPUs == amount {
@ -1964,9 +1964,9 @@ func (q *qemu) resumeSandbox(ctx context.Context) error {
} }
// addDevice will add extra devices to Qemu command line. // addDevice will add extra devices to Qemu command line.
func (q *qemu) addDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error { func (q *qemu) AddDevice(ctx context.Context, devInfo interface{}, devType DeviceType) error {
var err error var err error
span, _ := katatrace.Trace(ctx, q.Logger(), "addDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, _ := katatrace.Trace(ctx, q.Logger(), "AddDevice", qemuTracingTags, map[string]string{"sandbox_id": q.id})
katatrace.AddTag(span, "device", devInfo) katatrace.AddTag(span, "device", devInfo)
defer span.End() defer span.End()
@ -2024,8 +2024,8 @@ func (q *qemu) addDevice(ctx context.Context, devInfo interface{}, devType Devic
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (q *qemu) getSandboxConsole(ctx context.Context, id string) (string, string, error) { func (q *qemu) GetSandboxConsole(ctx context.Context, id string) (string, string, error) {
span, _ := katatrace.Trace(ctx, q.Logger(), "getSandboxConsole", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, _ := katatrace.Trace(ctx, q.Logger(), "GetSandboxConsole", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End() defer span.End()
consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket) consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket)
@ -2037,7 +2037,7 @@ func (q *qemu) getSandboxConsole(ctx context.Context, id string) (string, string
} }
func (q *qemu) saveSandbox() error { func (q *qemu) saveSandbox() error {
q.Logger().Info("save sandbox") q.Logger().Info("Save sandbox")
if err := q.qmpSetup(); err != nil { if err := q.qmpSetup(); err != nil {
return err return err
@ -2089,8 +2089,8 @@ func (q *qemu) waitMigration() error {
return nil return nil
} }
func (q *qemu) disconnect(ctx context.Context) { func (q *qemu) Disconnect(ctx context.Context) {
span, _ := katatrace.Trace(ctx, q.Logger(), "disconnect", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, _ := katatrace.Trace(ctx, q.Logger(), "Disconnect", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End() defer span.End()
q.qmpShutdown() q.qmpShutdown()
@ -2107,7 +2107,7 @@ func (q *qemu) disconnect(ctx context.Context) {
// the memory to remove has to be at least the size of one slot. // the memory to remove has to be at least the size of one slot.
// To return memory back we are resizing the VM memory balloon. // To return memory back we are resizing the VM memory balloon.
// A longer term solution is evaluate solutions like virtio-mem // A longer term solution is evaluate solutions like virtio-mem
func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) { func (q *qemu) ResizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, MemoryDevice, error) {
currentMemory := q.config.MemorySize + uint32(q.state.HotpluggedMemory) currentMemory := q.config.MemorySize + uint32(q.state.HotpluggedMemory)
if err := q.qmpSetup(); err != nil { if err := q.qmpSetup(); err != nil {
@ -2138,7 +2138,7 @@ func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSiz
addMemDevice.SizeMB = int(memHotplugMB) addMemDevice.SizeMB = int(memHotplugMB)
addMemDevice.Probe = probe addMemDevice.Probe = probe
data, err := q.hotplugAddDevice(ctx, &addMemDevice, MemoryDev) data, err := q.HotplugAddDevice(ctx, &addMemDevice, MemoryDev)
if err != nil { if err != nil {
return currentMemory, addMemDevice, err return currentMemory, addMemDevice, err
} }
@ -2158,7 +2158,7 @@ func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSiz
addMemDevice.SizeMB = int(memHotunplugMB) addMemDevice.SizeMB = int(memHotunplugMB)
addMemDevice.Probe = probe addMemDevice.Probe = probe
data, err := q.hotplugRemoveDevice(ctx, &addMemDevice, MemoryDev) data, err := q.HotplugRemoveDevice(ctx, &addMemDevice, MemoryDev)
if err != nil { if err != nil {
return currentMemory, addMemDevice, err return currentMemory, addMemDevice, err
} }
@ -2166,7 +2166,7 @@ func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSiz
if !ok { if !ok {
return currentMemory, addMemDevice, fmt.Errorf("Could not get the memory removed, got %+v", data) return currentMemory, addMemDevice, fmt.Errorf("Could not get the memory removed, got %+v", data)
} }
//FIXME: This is to check memory hotplugRemoveDevice reported 0, as this is not supported. //FIXME: This is to Check memory HotplugRemoveDevice reported 0, as this is not supported.
// In the future if this is implemented this validation should be removed. // In the future if this is implemented this validation should be removed.
if memoryRemoved != 0 { if memoryRemoved != 0 {
return currentMemory, addMemDevice, fmt.Errorf("memory hot unplug is not supported, something went wrong") return currentMemory, addMemDevice, fmt.Errorf("memory hot unplug is not supported, something went wrong")
@ -2308,8 +2308,8 @@ func genericAppendPCIeRootPort(devices []govmmQemu.Device, number uint32, machin
return devices return devices
} }
func (q *qemu) getThreadIDs(ctx context.Context) (VcpuThreadIDs, error) { func (q *qemu) GetThreadIDs(ctx context.Context) (VcpuThreadIDs, error) {
span, _ := katatrace.Trace(ctx, q.Logger(), "getThreadIDs", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, _ := katatrace.Trace(ctx, q.Logger(), "GetThreadIDs", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End() defer span.End()
tid := VcpuThreadIDs{} tid := VcpuThreadIDs{}
@ -2340,7 +2340,7 @@ func calcHotplugMemMiBSize(mem uint32, memorySectionSizeMB uint32) (uint32, erro
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
} }
func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (q *qemu) ResizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
currentVCPUs = q.config.NumVCPUs + uint32(len(q.state.HotpluggedVCPUs)) currentVCPUs = q.config.NumVCPUs + uint32(len(q.state.HotpluggedVCPUs))
newVCPUs = currentVCPUs newVCPUs = currentVCPUs
@ -2348,7 +2348,7 @@ func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs u
case currentVCPUs < reqVCPUs: case currentVCPUs < reqVCPUs:
//hotplug //hotplug
addCPUs := reqVCPUs - currentVCPUs addCPUs := reqVCPUs - currentVCPUs
data, err := q.hotplugAddDevice(ctx, addCPUs, CpuDev) data, err := q.HotplugAddDevice(ctx, addCPUs, CpuDev)
if err != nil { if err != nil {
return currentVCPUs, newVCPUs, err return currentVCPUs, newVCPUs, err
} }
@ -2360,7 +2360,7 @@ func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs u
case currentVCPUs > reqVCPUs: case currentVCPUs > reqVCPUs:
//hotunplug //hotunplug
removeCPUs := currentVCPUs - reqVCPUs removeCPUs := currentVCPUs - reqVCPUs
data, err := q.hotplugRemoveDevice(ctx, removeCPUs, CpuDev) data, err := q.HotplugRemoveDevice(ctx, removeCPUs, CpuDev)
if err != nil { if err != nil {
return currentVCPUs, newVCPUs, err return currentVCPUs, newVCPUs, err
} }
@ -2373,8 +2373,8 @@ func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs u
return currentVCPUs, newVCPUs, nil return currentVCPUs, newVCPUs, nil
} }
func (q *qemu) cleanup(ctx context.Context) error { func (q *qemu) Cleanup(ctx context.Context) error {
span, _ := katatrace.Trace(ctx, q.Logger(), "cleanup", qemuTracingTags, map[string]string{"sandbox_id": q.id}) span, _ := katatrace.Trace(ctx, q.Logger(), "Cleanup", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End() defer span.End()
for _, fd := range q.fds { for _, fd := range q.fds {
@ -2387,7 +2387,7 @@ func (q *qemu) cleanup(ctx context.Context) error {
return nil return nil
} }
func (q *qemu) getPids() []int { func (q *qemu) GetPids() []int {
data, err := ioutil.ReadFile(q.qemuConfig.PidFile) data, err := ioutil.ReadFile(q.qemuConfig.PidFile)
if err != nil { if err != nil {
q.Logger().WithError(err).Error("Could not read qemu pid file") q.Logger().WithError(err).Error("Could not read qemu pid file")
@ -2408,7 +2408,7 @@ func (q *qemu) getPids() []int {
return pids return pids
} }
func (q *qemu) getVirtioFsPid() *int { func (q *qemu) GetVirtioFsPid() *int {
return &q.state.VirtiofsdPid return &q.state.VirtiofsdPid
} }
@ -2454,7 +2454,7 @@ func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) { func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
q.qmpShutdown() q.qmpShutdown()
q.cleanup(ctx) q.Cleanup(ctx)
qp := qemuGrpc{ qp := qemuGrpc{
ID: q.id, ID: q.id,
QmpChannelpath: q.qmpMonitorCh.path, QmpChannelpath: q.qmpMonitorCh.path,
@ -2467,14 +2467,14 @@ func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
return json.Marshal(&qp) return json.Marshal(&qp)
} }
func (q *qemu) save() (s persistapi.HypervisorState) { func (q *qemu) Save() (s persistapi.HypervisorState) {
// If QEMU isn't even running, there isn't any state to save // If QEMU isn't even running, there isn't any state to Save
if q.stopped { if q.stopped {
return return
} }
pids := q.getPids() pids := q.GetPids()
if len(pids) != 0 { if len(pids) != 0 {
s.Pid = pids[0] s.Pid = pids[0]
} }
@ -2502,7 +2502,7 @@ func (q *qemu) save() (s persistapi.HypervisorState) {
return return
} }
func (q *qemu) load(s persistapi.HypervisorState) { func (q *qemu) Load(s persistapi.HypervisorState) {
q.state.UUID = s.UUID q.state.UUID = s.UUID
q.state.HotpluggedMemory = s.HotpluggedMemory q.state.HotpluggedMemory = s.HotpluggedMemory
q.state.HotplugVFIOOnRootBus = s.HotplugVFIOOnRootBus q.state.HotplugVFIOOnRootBus = s.HotplugVFIOOnRootBus
@ -2520,7 +2520,7 @@ func (q *qemu) load(s persistapi.HypervisorState) {
} }
} }
func (q *qemu) check() error { func (q *qemu) Check() error {
q.memoryDumpFlag.Lock() q.memoryDumpFlag.Lock()
defer q.memoryDumpFlag.Unlock() defer q.memoryDumpFlag.Unlock()
@ -2540,11 +2540,11 @@ func (q *qemu) check() error {
return nil return nil
} }
func (q *qemu) generateSocket(id string) (interface{}, error) { func (q *qemu) GenerateSocket(id string) (interface{}, error) {
return generateVMSocket(id, q.store.RunVMStoragePath()) return generateVMSocket(id, q.store.RunVMStoragePath())
} }
func (q *qemu) isRateLimiterBuiltin() bool { func (q *qemu) IsRateLimiterBuiltin() bool {
return false return false
} }

View File

@ -119,7 +119,7 @@ func TestQemuAmd64AppendImage(t *testing.T) {
imageStat, err := f.Stat() imageStat, err := f.Stat()
assert.NoError(err) assert.NoError(err)
// save default supportedQemuMachines options // Save default supportedQemuMachines options
machinesCopy := make([]govmmQemu.Machine, len(supportedQemuMachines)) machinesCopy := make([]govmmQemu.Machine, len(supportedQemuMachines))
assert.Equal(len(supportedQemuMachines), copy(machinesCopy, supportedQemuMachines)) assert.Equal(len(supportedQemuMachines), copy(machinesCopy, supportedQemuMachines))

View File

@ -211,7 +211,7 @@ func testQemuAddDevice(t *testing.T, devInfo interface{}, devType DeviceType, ex
arch: &qemuArchBase{}, arch: &qemuArchBase{},
} }
err := q.addDevice(context.Background(), devInfo, devType) err := q.AddDevice(context.Background(), devInfo, devType)
assert.NoError(err) assert.NoError(err)
assert.Exactly(q.qemuConfig.Devices, expected) assert.Exactly(q.qemuConfig.Devices, expected)
} }
@ -332,7 +332,7 @@ func TestQemuGetSandboxConsole(t *testing.T) {
sandboxID := "testSandboxID" sandboxID := "testSandboxID"
expected := filepath.Join(q.store.RunVMStoragePath(), sandboxID, consoleSocket) expected := filepath.Join(q.store.RunVMStoragePath(), sandboxID, consoleSocket)
proto, result, err := q.getSandboxConsole(q.ctx, sandboxID) proto, result, err := q.GetSandboxConsole(q.ctx, sandboxID)
assert.NoError(err) assert.NoError(err)
assert.Equal(result, expected) assert.Equal(result, expected)
assert.Equal(proto, consoleProtoUnix) assert.Equal(proto, consoleProtoUnix)
@ -345,7 +345,7 @@ func TestQemuCapabilities(t *testing.T) {
arch: &qemuArchBase{}, arch: &qemuArchBase{},
} }
caps := q.capabilities(q.ctx) caps := q.Capabilities(q.ctx)
assert.True(caps.IsBlockDeviceHotplugSupported()) assert.True(caps.IsBlockDeviceHotplugSupported())
} }
@ -401,9 +401,9 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) {
config: qemuConfig, config: qemuConfig,
} }
_, err := q.hotplugAddDevice(context.Background(), &MemoryDevice{0, 128, uint64(0), false}, FsDev) _, err := q.HotplugAddDevice(context.Background(), &MemoryDevice{0, 128, uint64(0), false}, FsDev)
assert.Error(err) assert.Error(err)
_, err = q.hotplugRemoveDevice(context.Background(), &MemoryDevice{0, 128, uint64(0), false}, FsDev) _, err = q.HotplugRemoveDevice(context.Background(), &MemoryDevice{0, 128, uint64(0), false}, FsDev)
assert.Error(err) assert.Error(err)
} }
@ -430,7 +430,7 @@ func TestQemuCleanup(t *testing.T) {
config: newQemuConfig(), config: newQemuConfig(),
} }
err := q.cleanup(q.ctx) err := q.Cleanup(q.ctx)
assert.Nil(err) assert.Nil(err)
} }
@ -554,7 +554,7 @@ func TestQemuGetpids(t *testing.T) {
qemuConfig := newQemuConfig() qemuConfig := newQemuConfig()
q := &qemu{} q := &qemu{}
pids := q.getPids() pids := q.GetPids()
assert.NotNil(pids) assert.NotNil(pids)
assert.True(len(pids) == 1) assert.True(len(pids) == 1)
assert.True(pids[0] == 0) assert.True(pids[0] == 0)
@ -569,18 +569,18 @@ func TestQemuGetpids(t *testing.T) {
defer os.Remove(tmpfile) defer os.Remove(tmpfile)
q.qemuConfig.PidFile = tmpfile q.qemuConfig.PidFile = tmpfile
pids = q.getPids() pids = q.GetPids()
assert.True(len(pids) == 1) assert.True(len(pids) == 1)
assert.True(pids[0] == 0) assert.True(pids[0] == 0)
err = ioutil.WriteFile(tmpfile, []byte("100"), 0) err = ioutil.WriteFile(tmpfile, []byte("100"), 0)
assert.Nil(err) assert.Nil(err)
pids = q.getPids() pids = q.GetPids()
assert.True(len(pids) == 1) assert.True(len(pids) == 1)
assert.True(pids[0] == 100) assert.True(pids[0] == 100)
q.state.VirtiofsdPid = 200 q.state.VirtiofsdPid = 200
pids = q.getPids() pids = q.GetPids()
assert.True(len(pids) == 2) assert.True(len(pids) == 2)
assert.True(pids[0] == 100) assert.True(pids[0] == 100)
assert.True(pids[1] == 200) assert.True(pids[1] == 200)

View File

@ -259,7 +259,7 @@ func (s *Sandbox) GetNetNs() string {
// GetHypervisorPid returns the hypervisor's pid. // GetHypervisorPid returns the hypervisor's pid.
func (s *Sandbox) GetHypervisorPid() (int, error) { func (s *Sandbox) GetHypervisorPid() (int, error) {
pids := s.hypervisor.getPids() pids := s.hypervisor.GetPids()
if len(pids) == 0 || pids[0] == 0 { if len(pids) == 0 || pids[0] == 0 {
return -1, fmt.Errorf("Invalid hypervisor PID: %+v", pids) return -1, fmt.Errorf("Invalid hypervisor PID: %+v", pids)
} }
@ -294,7 +294,7 @@ func (s *Sandbox) Release(ctx context.Context) error {
if s.monitor != nil { if s.monitor != nil {
s.monitor.stop() s.monitor.stop()
} }
s.hypervisor.disconnect(ctx) s.hypervisor.Disconnect(ctx)
return s.agent.disconnect(ctx) return s.agent.disconnect(ctx)
} }
@ -474,7 +474,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
return s, nil return s, nil
} }
// Below code path is called only during create, because of earlier check. // Below code path is called only during create, because of earlier Check.
if err := s.agent.createSandbox(ctx, s); err != nil { if err := s.agent.createSandbox(ctx, s); err != nil {
return nil, err return nil, err
} }
@ -755,7 +755,7 @@ func (s *Sandbox) Delete(ctx context.Context) error {
if !rootless.IsRootless() { if !rootless.IsRootless() {
if err := s.cgroupsDelete(); err != nil { if err := s.cgroupsDelete(); err != nil {
s.Logger().WithError(err).Error("failed to cleanup cgroups") s.Logger().WithError(err).Error("failed to Cleanup cgroups")
} }
} }
@ -763,8 +763,8 @@ func (s *Sandbox) Delete(ctx context.Context) error {
s.monitor.stop() s.monitor.stop()
} }
if err := s.hypervisor.cleanup(ctx); err != nil { if err := s.hypervisor.Cleanup(ctx); err != nil {
s.Logger().WithError(err).Error("failed to cleanup hypervisor") s.Logger().WithError(err).Error("failed to Cleanup hypervisor")
} }
s.agent.cleanup(ctx, s) s.agent.cleanup(ctx, s)
@ -979,7 +979,7 @@ func newConsoleWatcher(ctx context.Context, s *Sandbox) (*consoleWatcher, error)
cw consoleWatcher cw consoleWatcher
) )
cw.proto, cw.consoleURL, err = s.hypervisor.getSandboxConsole(ctx, s.id) cw.proto, cw.consoleURL, err = s.hypervisor.GetSandboxConsole(ctx, s.id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1036,7 +1036,7 @@ func (cw *consoleWatcher) start(s *Sandbox) (err error) {
return nil return nil
} }
// check if the console watcher has already watched the vm console. // Check if the console watcher has already watched the vm console.
func (cw *consoleWatcher) consoleWatched() bool { func (cw *consoleWatcher) consoleWatched() bool {
return cw.conn != nil || cw.ptyConsole != nil return cw.conn != nil || cw.ptyConsole != nil
} }
@ -1101,7 +1101,7 @@ func (s *Sandbox) addSwap(ctx context.Context, swapID string, size int64) (*conf
ID: swapID, ID: swapID,
Swap: true, Swap: true,
} }
_, err = s.hypervisor.hotplugAddDevice(ctx, blockDevice, BlockDev) _, err = s.hypervisor.HotplugAddDevice(ctx, blockDevice, BlockDev)
if err != nil { if err != nil {
err = fmt.Errorf("add swapfile %s device to VM fail %s", swapFile, err.Error()) err = fmt.Errorf("add swapfile %s device to VM fail %s", swapFile, err.Error())
s.Logger().WithError(err).Error("addSwap") s.Logger().WithError(err).Error("addSwap")
@ -1109,7 +1109,7 @@ func (s *Sandbox) addSwap(ctx context.Context, swapID string, size int64) (*conf
} }
defer func() { defer func() {
if err != nil { if err != nil {
_, e := s.hypervisor.hotplugRemoveDevice(ctx, blockDevice, BlockDev) _, e := s.hypervisor.HotplugRemoveDevice(ctx, blockDevice, BlockDev)
if e != nil { if e != nil {
s.Logger().Errorf("remove swapfile %s to VM fail %s", swapFile, e.Error()) s.Logger().Errorf("remove swapfile %s to VM fail %s", swapFile, e.Error())
} }
@ -1539,7 +1539,7 @@ func (s *Sandbox) Stats(ctx context.Context) (SandboxStats, error) {
// TODO Do we want to aggregate the overhead cgroup stats to the sandbox ones? // TODO Do we want to aggregate the overhead cgroup stats to the sandbox ones?
stats.CgroupStats.CPUStats.CPUUsage.TotalUsage = metrics.CPU.Usage.Total stats.CgroupStats.CPUStats.CPUUsage.TotalUsage = metrics.CPU.Usage.Total
stats.CgroupStats.MemoryStats.Usage.Usage = metrics.Memory.Usage.Usage stats.CgroupStats.MemoryStats.Usage.Usage = metrics.Memory.Usage.Usage
tids, err := s.hypervisor.getThreadIDs(ctx) tids, err := s.hypervisor.GetThreadIDs(ctx)
if err != nil { if err != nil {
return stats, err return stats, err
} }
@ -1780,7 +1780,7 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
// adding a group of VFIO devices // adding a group of VFIO devices
for _, dev := range vfioDevices { for _, dev := range vfioDevices {
if _, err := s.hypervisor.hotplugAddDevice(ctx, dev, VfioDev); err != nil { if _, err := s.hypervisor.HotplugAddDevice(ctx, dev, VfioDev); err != nil {
s.Logger(). s.Logger().
WithFields(logrus.Fields{ WithFields(logrus.Fields{
"sandbox": s.id, "sandbox": s.id,
@ -1796,14 +1796,14 @@ func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devTy
if !ok { if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType) return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
} }
_, err := s.hypervisor.hotplugAddDevice(ctx, blockDevice.BlockDrive, BlockDev) _, err := s.hypervisor.HotplugAddDevice(ctx, blockDevice.BlockDrive, BlockDev)
return err return err
case config.VhostUserBlk: case config.VhostUserBlk:
vhostUserBlkDevice, ok := device.(*drivers.VhostUserBlkDevice) vhostUserBlkDevice, ok := device.(*drivers.VhostUserBlkDevice)
if !ok { if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType) return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
} }
_, err := s.hypervisor.hotplugAddDevice(ctx, vhostUserBlkDevice.VhostUserDeviceAttrs, VhostuserDev) _, err := s.hypervisor.HotplugAddDevice(ctx, vhostUserBlkDevice.VhostUserDeviceAttrs, VhostuserDev)
return err return err
case config.DeviceGeneric: case config.DeviceGeneric:
// TODO: what? // TODO: what?
@ -1831,7 +1831,7 @@ func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, de
// remove a group of VFIO devices // remove a group of VFIO devices
for _, dev := range vfioDevices { for _, dev := range vfioDevices {
if _, err := s.hypervisor.hotplugRemoveDevice(ctx, dev, VfioDev); err != nil { if _, err := s.hypervisor.HotplugRemoveDevice(ctx, dev, VfioDev); err != nil {
s.Logger().WithError(err). s.Logger().WithError(err).
WithFields(logrus.Fields{ WithFields(logrus.Fields{
"sandbox": s.id, "sandbox": s.id,
@ -1852,14 +1852,14 @@ func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, de
s.Logger().WithField("path", blockDrive.File).Infof("Skip device: cannot hot remove PMEM devices") s.Logger().WithField("path", blockDrive.File).Infof("Skip device: cannot hot remove PMEM devices")
return nil return nil
} }
_, err := s.hypervisor.hotplugRemoveDevice(ctx, blockDrive, BlockDev) _, err := s.hypervisor.HotplugRemoveDevice(ctx, blockDrive, BlockDev)
return err return err
case config.VhostUserBlk: case config.VhostUserBlk:
vhostUserDeviceAttrs, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs) vhostUserDeviceAttrs, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
if !ok { if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType) return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
} }
_, err := s.hypervisor.hotplugRemoveDevice(ctx, vhostUserDeviceAttrs, VhostuserDev) _, err := s.hypervisor.HotplugRemoveDevice(ctx, vhostUserDeviceAttrs, VhostuserDev)
return err return err
case config.DeviceGeneric: case config.DeviceGeneric:
// TODO: what? // TODO: what?
@ -1886,11 +1886,11 @@ func (s *Sandbox) UnsetSandboxBlockIndex(index int) error {
func (s *Sandbox) AppendDevice(ctx context.Context, device api.Device) error { func (s *Sandbox) AppendDevice(ctx context.Context, device api.Device) error {
switch device.DeviceType() { switch device.DeviceType() {
case config.VhostUserSCSI, config.VhostUserNet, config.VhostUserBlk, config.VhostUserFS: case config.VhostUserSCSI, config.VhostUserNet, config.VhostUserBlk, config.VhostUserFS:
return s.hypervisor.addDevice(ctx, device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), VhostuserDev) return s.hypervisor.AddDevice(ctx, device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), VhostuserDev)
case config.DeviceVFIO: case config.DeviceVFIO:
vfioDevs := device.GetDeviceInfo().([]*config.VFIODev) vfioDevs := device.GetDeviceInfo().([]*config.VFIODev)
for _, d := range vfioDevs { for _, d := range vfioDevs {
return s.hypervisor.addDevice(ctx, *d, VfioDev) return s.hypervisor.AddDevice(ctx, *d, VfioDev)
} }
default: default:
s.Logger().WithField("device-type", device.DeviceType()). s.Logger().WithField("device-type", device.DeviceType()).
@ -1949,11 +1949,11 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
return err return err
} }
// Add default vcpus for sandbox // Add default vcpus for sandbox
sandboxVCPUs += s.hypervisor.hypervisorConfig().NumVCPUs sandboxVCPUs += s.hypervisor.HypervisorConfig().NumVCPUs
sandboxMemoryByte, sandboxneedPodSwap, sandboxSwapByte := s.calculateSandboxMemory() sandboxMemoryByte, sandboxneedPodSwap, sandboxSwapByte := s.calculateSandboxMemory()
// Add default / rsvd memory for sandbox. // Add default / rsvd memory for sandbox.
hypervisorMemoryByte := int64(s.hypervisor.hypervisorConfig().MemorySize) << utils.MibToBytesShift hypervisorMemoryByte := int64(s.hypervisor.HypervisorConfig().MemorySize) << utils.MibToBytesShift
sandboxMemoryByte += hypervisorMemoryByte sandboxMemoryByte += hypervisorMemoryByte
if sandboxneedPodSwap { if sandboxneedPodSwap {
sandboxSwapByte += hypervisorMemoryByte sandboxSwapByte += hypervisorMemoryByte
@ -1970,7 +1970,7 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
// Update VCPUs // Update VCPUs
s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs") s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs")
oldCPUs, newCPUs, err := s.hypervisor.resizeVCPUs(ctx, sandboxVCPUs) oldCPUs, newCPUs, err := s.hypervisor.ResizeVCPUs(ctx, sandboxVCPUs)
if err != nil { if err != nil {
return err return err
} }
@ -1988,7 +1988,7 @@ func (s *Sandbox) updateResources(ctx context.Context) error {
// Update Memory // Update Memory
s.Logger().WithField("memory-sandbox-size-byte", sandboxMemoryByte).Debugf("Request to hypervisor to update memory") s.Logger().WithField("memory-sandbox-size-byte", sandboxMemoryByte).Debugf("Request to hypervisor to update memory")
newMemory, updatedMemoryDevice, err := s.hypervisor.resizeMemory(ctx, uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe) newMemory, updatedMemoryDevice, err := s.hypervisor.ResizeMemory(ctx, uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe)
if err != nil { if err != nil {
if err == noGuestMemHotplugErr { if err == noGuestMemHotplugErr {
s.Logger().Warnf("%s, memory specifications cannot be guaranteed", err) s.Logger().Warnf("%s, memory specifications cannot be guaranteed", err)
@ -2157,7 +2157,7 @@ func (s *Sandbox) cgroupsDelete() error {
// constrainHypervisor will place the VMM and vCPU threads into cgroups. // constrainHypervisor will place the VMM and vCPU threads into cgroups.
func (s *Sandbox) constrainHypervisor(ctx context.Context) error { func (s *Sandbox) constrainHypervisor(ctx context.Context) error {
tids, err := s.hypervisor.getThreadIDs(ctx) tids, err := s.hypervisor.GetThreadIDs(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err) return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
} }
@ -2197,7 +2197,7 @@ func (s *Sandbox) setupCgroups() error {
// This OCI specification was patched when the sandbox was created // This OCI specification was patched when the sandbox was created
// by containerCapabilities(), SetEphemeralStorageType() and others // by containerCapabilities(), SetEphemeralStorageType() and others
// in order to support: // in order to support:
// * capabilities // * Capabilities
// * Ephemeral storage // * Ephemeral storage
// * k8s empty dir // * k8s empty dir
// If you need the original (vanilla) OCI spec, // If you need the original (vanilla) OCI spec,
@ -2264,7 +2264,7 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
var config SandboxConfig var config SandboxConfig
// load sandbox config fromld store. // Load sandbox config fromld store.
c, err := loadSandboxConfig(sandboxID) c, err := loadSandboxConfig(sandboxID)
if err != nil { if err != nil {
virtLog.WithError(err).Warning("failed to get sandbox config from store") virtLog.WithError(err).Warning("failed to get sandbox config from store")

View File

@ -132,7 +132,7 @@ func RegisterMetrics() {
// UpdateRuntimeMetrics update shim/hypervisor's metrics // UpdateRuntimeMetrics update shim/hypervisor's metrics
func (s *Sandbox) UpdateRuntimeMetrics() error { func (s *Sandbox) UpdateRuntimeMetrics() error {
pids := s.hypervisor.getPids() pids := s.hypervisor.GetPids()
if len(pids) == 0 { if len(pids) == 0 {
return nil return nil
} }
@ -183,7 +183,7 @@ func (s *Sandbox) UpdateRuntimeMetrics() error {
} }
func (s *Sandbox) UpdateVirtiofsdMetrics() error { func (s *Sandbox) UpdateVirtiofsdMetrics() error {
vfsPid := s.hypervisor.getVirtioFsPid() vfsPid := s.hypervisor.GetVirtioFsPid()
if vfsPid == nil { if vfsPid == nil {
// virtiofsd is not mandatory for a VMM. // virtiofsd is not mandatory for a VMM.
return nil return nil

View File

@ -203,7 +203,7 @@ func testForceSandboxStateChangeAndCheck(t *testing.T, p *Sandbox, newSandboxSta
// force sandbox state change // force sandbox state change
err := p.setSandboxState(newSandboxState.State) err := p.setSandboxState(newSandboxState.State)
assert.NoError(t, err) assert.NoError(t, err)
// check the in-memory state is correct // Check the in-memory state is correct
if p.state.State != newSandboxState.State { if p.state.State != newSandboxState.State {
return fmt.Errorf("Expected state %v, got %v", newSandboxState.State, p.state.State) return fmt.Errorf("Expected state %v, got %v", newSandboxState.State, p.state.State)
} }
@ -216,7 +216,7 @@ func testForceContainerStateChangeAndCheck(t *testing.T, p *Sandbox, c *Containe
err := c.setContainerState(newContainerState.State) err := c.setContainerState(newContainerState.State)
assert.NoError(t, err) assert.NoError(t, err)
// check the in-memory state is correct // Check the in-memory state is correct
if c.state.State != newContainerState.State { if c.state.State != newContainerState.State {
return fmt.Errorf("Expected state %v, got %v", newContainerState.State, c.state.State) return fmt.Errorf("Expected state %v, got %v", newContainerState.State, c.state.State)
} }
@ -225,7 +225,7 @@ func testForceContainerStateChangeAndCheck(t *testing.T, p *Sandbox, c *Containe
} }
func testCheckSandboxOnDiskState(p *Sandbox, sandboxState types.SandboxState) error { func testCheckSandboxOnDiskState(p *Sandbox, sandboxState types.SandboxState) error {
// check on-disk state is correct // Check on-disk state is correct
if p.state.State != sandboxState.State { if p.state.State != sandboxState.State {
return fmt.Errorf("Expected state %v, got %v", sandboxState.State, p.state.State) return fmt.Errorf("Expected state %v, got %v", sandboxState.State, p.state.State)
} }
@ -234,7 +234,7 @@ func testCheckSandboxOnDiskState(p *Sandbox, sandboxState types.SandboxState) er
} }
func testCheckContainerOnDiskState(c *Container, containerState types.ContainerState) error { func testCheckContainerOnDiskState(c *Container, containerState types.ContainerState) error {
// check on-disk state is correct // Check on-disk state is correct
if c.state.State != containerState.State { if c.state.State != containerState.State {
return fmt.Errorf("Expected state %v, got %v", containerState.State, c.state.State) return fmt.Errorf("Expected state %v, got %v", containerState.State, c.state.State)
} }
@ -251,7 +251,7 @@ func writeContainerConfig() (string, error) {
{ {
"ociVersion": "1.0.0-rc2-dev", "ociVersion": "1.0.0-rc2-dev",
"process": { "process": {
"capabilities": [ "Capabilities": [
] ]
} }
}` }`
@ -311,7 +311,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
c, err := p.findContainer(contID) c, err := p.findContainer(contID)
assert.NoError(err) assert.NoError(err)
// check initial sandbox and container states // Check initial sandbox and container states
if err := testCheckInitSandboxAndContainerStates(p, initialSandboxState, c, initialContainerState); err != nil { if err := testCheckInitSandboxAndContainerStates(p, initialSandboxState, c, initialContainerState); err != nil {
t.Error(err) t.Error(err)
} }
@ -1377,7 +1377,7 @@ func TestSandboxCreationFromConfigRollbackFromCreateSandbox(t *testing.T) {
// Fail at createSandbox: QEMU path does not exist, it is expected. Then rollback is called // Fail at createSandbox: QEMU path does not exist, it is expected. Then rollback is called
assert.Error(err) assert.Error(err)
// check dirs // Check dirs
err = checkSandboxRemains() err = checkSandboxRemains()
assert.NoError(err) assert.NoError(err)
} }

View File

@ -96,12 +96,12 @@ func (endpoint *TapEndpoint) HotAttach(ctx context.Context, h hypervisor) error
span, ctx := tapTrace(ctx, "HotAttach", endpoint) span, ctx := tapTrace(ctx, "HotAttach", endpoint)
defer span.End() defer span.End()
if err := tapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil { if err := tapNetwork(endpoint, h.HypervisorConfig().NumVCPUs, h.HypervisorConfig().DisableVhostNet); err != nil {
networkLogger().WithError(err).Error("Error bridging tap ep") networkLogger().WithError(err).Error("Error bridging tap ep")
return err return err
} }
if _, err := h.hotplugAddDevice(ctx, endpoint, NetDev); err != nil { if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error attach tap ep") networkLogger().WithError(err).Error("Error attach tap ep")
return err return err
} }
@ -121,7 +121,7 @@ func (endpoint *TapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsC
networkLogger().WithError(err).Warn("Error un-bridging tap ep") networkLogger().WithError(err).Warn("Error un-bridging tap ep")
} }
if _, err := h.hotplugRemoveDevice(ctx, endpoint, NetDev); err != nil { if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach tap ep") networkLogger().WithError(err).Error("Error detach tap ep")
return err return err
} }

View File

@ -82,7 +82,7 @@ func (endpoint *TuntapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
return err return err
} }
return h.addDevice(ctx, endpoint, NetDev) return h.AddDevice(ctx, endpoint, NetDev)
} }
// Detach for the tun/tap endpoint tears down the tap // Detach for the tun/tap endpoint tears down the tap
@ -107,12 +107,12 @@ func (endpoint *TuntapEndpoint) HotAttach(ctx context.Context, h hypervisor) err
span, ctx := tuntapTrace(ctx, "HotAttach", endpoint) span, ctx := tuntapTrace(ctx, "HotAttach", endpoint)
defer span.End() defer span.End()
if err := tuntapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil { if err := tuntapNetwork(endpoint, h.HypervisorConfig().NumVCPUs, h.HypervisorConfig().DisableVhostNet); err != nil {
networkLogger().WithError(err).Error("Error bridging tun/tap ep") networkLogger().WithError(err).Error("Error bridging tun/tap ep")
return err return err
} }
if _, err := h.hotplugAddDevice(ctx, endpoint, NetDev); err != nil { if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error attach tun/tap ep") networkLogger().WithError(err).Error("Error attach tun/tap ep")
return err return err
} }
@ -132,7 +132,7 @@ func (endpoint *TuntapEndpoint) HotDetach(ctx context.Context, h hypervisor, net
networkLogger().WithError(err).Warn("Error un-bridging tun/tap ep") networkLogger().WithError(err).Warn("Error un-bridging tun/tap ep")
} }
if _, err := h.hotplugRemoveDevice(ctx, endpoint, NetDev); err != nil { if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach tun/tap ep") networkLogger().WithError(err).Error("Error detach tun/tap ep")
return err return err
} }

View File

@ -103,7 +103,7 @@ func (endpoint *VethEndpoint) Attach(ctx context.Context, s *Sandbox) error {
return err return err
} }
return h.addDevice(ctx, endpoint, NetDev) return h.AddDevice(ctx, endpoint, NetDev)
} }
// Detach for the veth endpoint tears down the tap and bridge // Detach for the veth endpoint tears down the tap and bridge
@ -133,7 +133,7 @@ func (endpoint *VethEndpoint) HotAttach(ctx context.Context, h hypervisor) error
return err return err
} }
if _, err := h.hotplugAddDevice(ctx, endpoint, NetDev); err != nil { if _, err := h.HotplugAddDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error attach virtual ep") networkLogger().WithError(err).Error("Error attach virtual ep")
return err return err
} }
@ -155,7 +155,7 @@ func (endpoint *VethEndpoint) HotDetach(ctx context.Context, h hypervisor, netNs
networkLogger().WithError(err).Warn("Error un-bridging virtual ep") networkLogger().WithError(err).Warn("Error un-bridging virtual ep")
} }
if _, err := h.hotplugRemoveDevice(ctx, endpoint, NetDev); err != nil { if _, err := h.HotplugRemoveDevice(ctx, endpoint, NetDev); err != nil {
networkLogger().WithError(err).Error("Error detach virtual ep") networkLogger().WithError(err).Error("Error detach virtual ep")
return err return err
} }

View File

@ -96,7 +96,7 @@ func (endpoint *VhostUserEndpoint) Attach(ctx context.Context, s *Sandbox) error
Type: config.VhostUserNet, Type: config.VhostUserNet,
} }
return s.hypervisor.addDevice(ctx, d, VhostuserDev) return s.hypervisor.AddDevice(ctx, d, VhostuserDev)
} }
// Detach for vhostuser endpoint // Detach for vhostuser endpoint
@ -133,7 +133,7 @@ func findVhostUserNetSocketPath(netInfo NetworkInfo) (string, error) {
return "", nil return "", nil
} }
// check for socket file existence at known location. // Check for socket file existence at known location.
for _, addr := range netInfo.Addrs { for _, addr := range netInfo.Addrs {
socketPath := fmt.Sprintf(hostSocketSearchPath, addr.IPNet.IP) socketPath := fmt.Sprintf(hostSocketSearchPath, addr.IPNet.IP)
if _, err := os.Stat(socketPath); err == nil { if _, err := os.Stat(socketPath); err == nil {

View File

@ -134,7 +134,7 @@ func TestMain(m *testing.M) {
} }
utils.StartCmd = func(c *exec.Cmd) error { utils.StartCmd = func(c *exec.Cmd) error {
//startSandbox will check if the hypervisor is alive and //startSandbox will Check if the hypervisor is alive and
// checks for the PID is running, lets fake it using our // checks for the PID is running, lets fake it using our
// own PID // own PID
c.Process = &os.Process{Pid: os.Getpid()} c.Process = &os.Process{Pid: os.Getpid()}

View File

@ -42,7 +42,7 @@ type VMConfig struct {
HypervisorConfig HypervisorConfig HypervisorConfig HypervisorConfig
} }
// Valid check VMConfig validity. // Valid Check VMConfig validity.
func (c *VMConfig) Valid() error { func (c *VMConfig) Valid() error {
return c.HypervisorConfig.Valid() return c.HypervisorConfig.Valid()
} }
@ -141,10 +141,10 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
} }
}() }()
// 4. check agent aliveness // 4. Check agent aliveness
// VMs booted from template are paused, do not check // VMs booted from template are paused, do not Check
if !config.HypervisorConfig.BootFromTemplate { if !config.HypervisorConfig.BootFromTemplate {
virtLog.WithField("vm", id).Info("check agent status") virtLog.WithField("vm", id).Info("Check agent status")
err = agent.check(ctx) err = agent.check(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -220,7 +220,7 @@ func (v *VM) Pause(ctx context.Context) error {
// Save saves a VM to persistent disk. // Save saves a VM to persistent disk.
func (v *VM) Save() error { func (v *VM) Save() error {
v.logger().Info("save vm") v.logger().Info("Save vm")
return v.hypervisor.saveSandbox() return v.hypervisor.saveSandbox()
} }
@ -241,7 +241,7 @@ func (v *VM) Disconnect(ctx context.Context) error {
v.logger().Info("kill vm") v.logger().Info("kill vm")
if err := v.agent.disconnect(ctx); err != nil { if err := v.agent.disconnect(ctx); err != nil {
v.logger().WithError(err).Error("failed to disconnect agent") v.logger().WithError(err).Error("failed to Disconnect agent")
} }
return nil return nil
@ -262,7 +262,7 @@ func (v *VM) Stop(ctx context.Context) error {
func (v *VM) AddCPUs(ctx context.Context, num uint32) error { func (v *VM) AddCPUs(ctx context.Context, num uint32) error {
if num > 0 { if num > 0 {
v.logger().Infof("hot adding %d vCPUs", num) v.logger().Infof("hot adding %d vCPUs", num)
if _, err := v.hypervisor.hotplugAddDevice(ctx, num, CpuDev); err != nil { if _, err := v.hypervisor.HotplugAddDevice(ctx, num, CpuDev); err != nil {
return err return err
} }
v.cpuDelta += num v.cpuDelta += num
@ -277,7 +277,7 @@ func (v *VM) AddMemory(ctx context.Context, numMB uint32) error {
if numMB > 0 { if numMB > 0 {
v.logger().Infof("hot adding %d MB memory", numMB) v.logger().Infof("hot adding %d MB memory", numMB)
dev := &MemoryDevice{1, int(numMB), 0, false} dev := &MemoryDevice{1, int(numMB), 0, false}
if _, err := v.hypervisor.hotplugAddDevice(ctx, dev, MemoryDev); err != nil { if _, err := v.hypervisor.HotplugAddDevice(ctx, dev, MemoryDev); err != nil {
return err return err
} }
} }