vc: hypervisor: remove dependency on persist API

Today the hypervisor code in vc relies on persist pkg for two things:
1. To get the VM/run store path on the host filesystem,
2. For type definition of the Load/Save functions of the hypervisor
   interface.

For (1), we can simply remove the store interface from the hypervisor
config and replace it with just the path, since this is all we really
need. When we create a NewHypervisor structure, outside of the
hypervisor, we can populate this path.

For (2), rather than have the persist pkg define the structure, let's
let the hypervisor code (soon to be pkg) define the structure. persist
API already needs to call into hypervisor anyway; let's allow us to
define the structure.

We'll probably want to look at following similar pattern for other parts
of vc that we want to make independent of the persist API.

In doing this, we started an initial hypervisors pkg, to hold these
types (avoid a circular dependency between virtcontainers and persist
pkg). Next step will be to remove all other dependencies and move the
hypervisor specific code into this pkg, and out of virtcontaienrs.

Signed-off-by: Eric Ernst <eric_ernst@apple.com>
This commit is contained in:
Eric Ernst 2021-11-11 15:26:10 -08:00
parent 34f23de512
commit 4c2883f7e2
16 changed files with 133 additions and 141 deletions

View File

@ -1052,11 +1052,13 @@ func TestGetHypervisorInfoSocket(t *testing.T) {
{vc.QemuHypervisor, false},
}
config.HypervisorConfig.VMStorePath = "/foo"
config.HypervisorConfig.RunStorePath = "/bar"
for i, details := range hypervisors {
msg := fmt.Sprintf("hypervisor[%d]: %+v", i, details)
config.HypervisorType = details.hType
info, err := getHypervisorInfo(config)
assert.NoError(err, msg)

View File

@ -199,11 +199,15 @@ func TestAcrnGetSandboxConsole(t *testing.T) {
assert.NoError(err)
a := &Acrn{
ctx: context.Background(),
ctx: context.Background(),
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
store: store,
}
sandboxID := "testSandboxID"
expected := filepath.Join(a.store.RunVMStoragePath(), sandboxID, consoleSocket)
expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket)
proto, result, err := a.GetVMConsole(a.ctx, sandboxID)
assert.NoError(err)
@ -219,6 +223,10 @@ func TestAcrnCreateVM(t *testing.T) {
a := &Acrn{
store: store,
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
sandbox := &Sandbox{

View File

@ -20,12 +20,12 @@ import (
"time"
"github.com/containerd/console"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
chclient "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cloud-hypervisor/client"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
@ -157,7 +157,6 @@ func (s *CloudHypervisorState) reset() {
}
type cloudHypervisor struct {
store persistapi.PersistDriver
console console.Console
virtiofsd Virtiofsd
APIClient clhClient
@ -342,7 +341,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, networkNS N
clh.virtiofsd = &virtiofsd{
path: clh.config.VirtioFSDaemon,
sourcePath: filepath.Join(getSharePath(clh.id)),
sourcePath: filepath.Join(GetSharePath(clh.id)),
socketPath: virtiofsdSocketPath,
extraArgs: clh.config.VirtioFSExtraArgs,
debug: clh.config.Debug,
@ -374,7 +373,7 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error {
clh.Logger().WithField("function", "StartVM").Info("starting Sandbox")
vmPath := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
vmPath := filepath.Join(clh.config.VMStorePath, clh.id)
err := os.MkdirAll(vmPath, DirMode)
if err != nil {
return err
@ -747,7 +746,7 @@ func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("cloudHypervisor is not supported by VM cache")
}
func (clh *cloudHypervisor) Save() (s persistapi.HypervisorState) {
func (clh *cloudHypervisor) Save() (s hv.HypervisorState) {
s.Pid = clh.state.PID
s.Type = string(ClhHypervisor)
s.VirtiofsdPid = clh.state.VirtiofsdPID
@ -755,7 +754,7 @@ func (clh *cloudHypervisor) Save() (s persistapi.HypervisorState) {
return
}
func (clh *cloudHypervisor) Load(s persistapi.HypervisorState) {
func (clh *cloudHypervisor) Load(s hv.HypervisorState) {
clh.state.PID = s.Pid
clh.state.VirtiofsdPID = s.VirtiofsdPid
clh.state.apiSocket = s.APISocket
@ -893,15 +892,15 @@ func (clh *cloudHypervisor) GenerateSocket(id string) (interface{}, error) {
}
func (clh *cloudHypervisor) virtioFsSocketPath(id string) (string, error) {
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, virtioFsSocket)
return utils.BuildSocketPath(clh.config.VMStorePath, id, virtioFsSocket)
}
func (clh *cloudHypervisor) vsockSocketPath(id string) (string, error) {
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhSocket)
return utils.BuildSocketPath(clh.config.VMStorePath, id, clhSocket)
}
func (clh *cloudHypervisor) apiSocketPath(id string) (string, error) {
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhAPISocket)
return utils.BuildSocketPath(clh.config.VMStorePath, id, clhAPISocket)
}
func (clh *cloudHypervisor) waitVMM(timeout uint) error {
@ -1213,7 +1212,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
}
// Cleanup vm path
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
dir := filepath.Join(clh.config.VMStorePath, clh.id)
// If it's a symlink, remove both dir and the target.
link, err := filepath.EvalSymlinks(dir)
@ -1242,7 +1241,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
}
if clh.config.VMid != "" {
dir = filepath.Join(clh.store.RunStoragePath(), clh.config.VMid)
dir = filepath.Join(clh.config.VMStorePath, clh.config.VMid)
if err := os.RemoveAll(dir); err != nil {
if !force {
return err

View File

@ -203,7 +203,10 @@ func TestCloudHypervisorCleanupVM(t *testing.T) {
assert.NoError(err, "persist.GetDriver() unexpected error")
clh := &cloudHypervisor{
store: store,
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
err = clh.cleanupVM(true)
@ -214,7 +217,7 @@ func TestCloudHypervisorCleanupVM(t *testing.T) {
err = clh.cleanupVM(true)
assert.NoError(err, "persist.GetDriver() unexpected error")
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
dir := filepath.Join(store.RunVMStoragePath(), clh.id)
os.MkdirAll(dir, os.ModePerm)
err = clh.cleanupVM(false)
@ -235,9 +238,11 @@ func TestClhCreateVM(t *testing.T) {
store, err := persist.GetDriver()
assert.NoError(err)
clhConfig.VMStorePath = store.RunVMStoragePath()
clhConfig.RunStorePath = store.RunStoragePath()
clh := &cloudHypervisor{
config: clhConfig,
store: store,
}
sandbox := &Sandbox{
@ -261,11 +266,13 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
store, err := persist.GetDriver()
assert.NoError(err)
clhConfig.VMStorePath = store.RunVMStoragePath()
clhConfig.RunStorePath = store.RunStoragePath()
clh := &cloudHypervisor{
config: clhConfig,
APIClient: &clhClientMock{},
virtiofsd: &virtiofsdMock{},
store: store,
}
err = clh.StartVM(context.Background(), 10)
@ -379,6 +386,11 @@ func TestClhGenerateSocket(t *testing.T) {
clh, ok := hypervisor.(*cloudHypervisor)
assert.True(ok)
clh.config = HypervisorConfig{
VMStorePath: "/foo",
RunStorePath: "/bar",
}
clh.addVSock(1, "path")
s, err := clh.GenerateSocket("c")
@ -391,7 +403,7 @@ func TestClhGenerateSocket(t *testing.T) {
assert.NotEmpty(hvsock.UdsPath)
// Path must be absolute
assert.True(strings.HasPrefix(hvsock.UdsPath, "/"))
assert.True(strings.HasPrefix(hvsock.UdsPath, "/"), "failed: socket path: %s", hvsock.UdsPath)
assert.NotZero(hvsock.Port)
}

View File

@ -22,9 +22,9 @@ import (
"syscall"
"time"
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/firecracker/client"
models "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/firecracker/client/models"
ops "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/firecracker/client/operations"
@ -1226,13 +1226,13 @@ func (fc *firecracker) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("firecracker is not supported by VM cache")
}
func (fc *firecracker) Save() (s persistapi.HypervisorState) {
func (fc *firecracker) Save() (s hv.HypervisorState) {
s.Pid = fc.info.PID
s.Type = string(FirecrackerHypervisor)
return
}
func (fc *firecracker) Load(s persistapi.HypervisorState) {
func (fc *firecracker) Load(s hv.HypervisorState) {
fc.info.PID = s.Pid
}

View File

@ -14,9 +14,8 @@ import (
"strconv"
"strings"
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
)
@ -185,28 +184,18 @@ func (hType *HypervisorType) String() string {
}
}
// NewHypervisor returns an hypervisor from and hypervisor type.
// NewHypervisor returns an hypervisor from a hypervisor type.
func NewHypervisor(hType HypervisorType) (Hypervisor, error) {
store, err := persist.GetDriver()
if err != nil {
return nil, err
}
switch hType {
case QemuHypervisor:
return &qemu{
store: store,
}, nil
return &qemu{}, nil
case FirecrackerHypervisor:
return &firecracker{}, nil
case AcrnHypervisor:
return &Acrn{
store: store,
}, nil
return &Acrn{}, nil
case ClhHypervisor:
return &cloudHypervisor{
store: store,
}, nil
return &cloudHypervisor{}, nil
case MockHypervisor:
return &mockHypervisor{}, nil
default:
@ -345,6 +334,12 @@ type HypervisorConfig struct {
// VMid is "" if the hypervisor is not created by the factory.
VMid string
// VMStorePath is the location on disk where VM information will persist
VMStorePath string
// VMStorePath is the location on disk where runtime information will persist
RunStorePath string
// SELinux label for the VM
SELinuxProcessLabel string
@ -934,8 +929,8 @@ type Hypervisor interface {
toGrpc(ctx context.Context) ([]byte, error)
Check() error
Save() persistapi.HypervisorState
Load(persistapi.HypervisorState)
Save() hv.HypervisorState
Load(hv.HypervisorState)
// generate the socket to communicate the host and guest
GenerateSocket(id string) (interface{}, error)

View File

@ -356,7 +356,7 @@ func (k *kataAgent) setupSandboxBindMounts(ctx context.Context, sandbox *Sandbox
// Create subdirectory in host shared path for sandbox mounts
sandboxMountDir := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir)
sandboxShareDir := filepath.Join(getSharePath(sandbox.id), sandboxMountsDir)
sandboxShareDir := filepath.Join(GetSharePath(sandbox.id), sandboxMountsDir)
if err := os.MkdirAll(sandboxMountDir, DirMode); err != nil {
return fmt.Errorf("Creating sandbox shared mount directory: %v: %w", sandboxMountDir, err)
}
@ -473,7 +473,7 @@ func (k *kataAgent) setupSharedPath(ctx context.Context, sandbox *Sandbox) (err
defer span.End()
// create shared path structure
sharePath := getSharePath(sandbox.id)
sharePath := GetSharePath(sandbox.id)
mountPath := getMountPath(sandbox.id)
if err := os.MkdirAll(sharePath, sharedDirMode); err != nil {
return err
@ -509,7 +509,7 @@ func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
if err := k.setupSharedPath(ctx, sandbox); err != nil {
return err
}
return k.configure(ctx, sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), sandbox.config.AgentConfig)
return k.configure(ctx, sandbox.hypervisor, sandbox.id, GetSharePath(sandbox.id), sandbox.config.AgentConfig)
}
func cmdToKataProcess(cmd types.Cmd) (process *grpc.Process, err error) {
@ -2198,7 +2198,7 @@ func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
}
// Unmount shared path
path := getSharePath(s.id)
path := GetSharePath(s.id)
k.Logger().WithField("path", path).Infof("Cleanup agent")
if err := syscall.Unmount(path, syscall.MNT_DETACH|UmountNoFollow); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount vm share path %s", path)

View File

@ -1158,7 +1158,7 @@ func TestSandboxBindMount(t *testing.T) {
assert.Nil(err)
defer os.RemoveAll(dir)
sharePath := getSharePath(sandbox.id)
sharePath := GetSharePath(sandbox.id)
mountPath := getMountPath(sandbox.id)
err = os.MkdirAll(sharePath, DirMode)

View File

@ -10,7 +10,7 @@ import (
"errors"
"os"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
)
@ -130,11 +130,11 @@ func (m *mockHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("mockHypervisor is not supported by VM cache")
}
func (m *mockHypervisor) Save() (s persistapi.HypervisorState) {
func (m *mockHypervisor) Save() (s hv.HypervisorState) {
return
}
func (m *mockHypervisor) Load(s persistapi.HypervisorState) {}
func (m *mockHypervisor) Load(s hv.HypervisorState) {}
func (m *mockHypervisor) Check() error {
return nil

View File

@ -8,6 +8,7 @@ package virtcontainers
import (
"errors"
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
exp "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/experimental"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
@ -315,7 +316,7 @@ func (c *Container) loadContState(cs persistapi.ContainerState) {
}
}
func (s *Sandbox) loadHypervisor(hs persistapi.HypervisorState) {
func (s *Sandbox) loadHypervisor(hs hv.HypervisorState) {
s.hypervisor.Load(hs)
}

View File

@ -1,50 +0,0 @@
// Copyright (c) 2019 Huawei Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package persistapi
// Bridge is a bridge where devices can be hot plugged
type Bridge struct {
// DeviceAddr contains information about devices plugged and its address in the bridge
DeviceAddr map[uint32]string
// Type is the type of the bridge (pci, pcie, etc)
Type string
//ID is used to identify the bridge in the hypervisor
ID string
// Addr is the PCI/e slot of the bridge
Addr int
}
// CPUDevice represents a CPU device which was hot-added in a running VM
type CPUDevice struct {
// ID is used to identify this CPU in the hypervisor options.
ID string
}
type HypervisorState struct {
BlockIndexMap map[int]struct{}
// Type of hypervisor, E.g. qemu/firecracker/acrn.
Type string
UUID string
// clh sepcific: refer to 'virtcontainers/clh.go:CloudHypervisorState'
APISocket string
// Belows are qemu specific
// Refs: virtcontainers/qemu.go:QemuState
Bridges []Bridge
// HotpluggedCPUs is the list of CPUs that were hot-added
HotpluggedVCPUs []CPUDevice
HotpluggedMemory int
VirtiofsdPid int
Pid int
PCIeRootPort int
HotplugVFIOOnRootBus bool
}

View File

@ -6,6 +6,10 @@
package persistapi
import (
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
)
// ============= sandbox level resources =============
// AgentState save agent state data
@ -38,7 +42,7 @@ type SandboxState struct {
OverheadCgroupPath string
// HypervisorState saves hypervisor specific data
HypervisorState HypervisorState
HypervisorState hv.HypervisorState
// AgentState saves state data of agent
AgentState AgentState

View File

@ -31,11 +31,11 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
pkgUtils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
"github.com/kata-containers/kata-containers/src/runtime/pkg/uuid"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
@ -67,18 +67,12 @@ type qmpChannel struct {
sync.Mutex
}
// CPUDevice represents a CPU device which was hot-added in a running VM
type CPUDevice struct {
// ID is used to identify this CPU in the hypervisor options.
ID string
}
// QemuState keeps Qemu's state
type QemuState struct {
UUID string
Bridges []types.Bridge
// HotpluggedCPUs is the list of CPUs that were hot-added
HotpluggedVCPUs []CPUDevice
HotpluggedVCPUs []hv.CPUDevice
HotpluggedMemory int
VirtiofsdPid int
PCIeRootPort int
@ -92,8 +86,6 @@ type qemu struct {
virtiofsd Virtiofsd
store persistapi.PersistDriver
ctx context.Context
// fds is a list of file descriptors inherited by QEMU process
@ -276,7 +268,7 @@ func (q *qemu) setup(ctx context.Context, id string, hypervisorConfig *Hyperviso
// The path might already exist, but in case of VM templating,
// we have to create it since the sandbox has not created it yet.
if err = utils.MkdirAllWithInheritedOwner(filepath.Join(q.store.RunStoragePath(), id), DirMode); err != nil {
if err = utils.MkdirAllWithInheritedOwner(filepath.Join(q.config.RunStorePath, id), DirMode); err != nil {
return err
}
}
@ -331,7 +323,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
}
func (q *qemu) qmpSocketPath(id string) (string, error) {
return utils.BuildSocketPath(q.store.RunVMStoragePath(), id, qmpSocket)
return utils.BuildSocketPath(q.config.VMStorePath, id, qmpSocket)
}
func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) {
@ -618,7 +610,7 @@ func (q *qemu) CreateVM(ctx context.Context, id string, networkNS NetworkNamespa
GlobalParam: "kvm-pit.lost_tick_policy=discard",
Bios: firmwarePath,
PFlash: pflash,
PidFile: filepath.Join(q.store.RunVMStoragePath(), q.id, "pid"),
PidFile: filepath.Join(q.config.VMStorePath, q.id, "pid"),
}
qemuConfig.Devices, qemuConfig.Bios, err = q.arch.appendProtectionDevice(qemuConfig.Devices, firmwarePath)
@ -666,7 +658,7 @@ func (q *qemu) CreateVM(ctx context.Context, id string, networkNS NetworkNamespa
}
func (q *qemu) vhostFSSocketPath(id string) (string, error) {
return utils.BuildSocketPath(q.store.RunVMStoragePath(), id, vhostFSSocket)
return utils.BuildSocketPath(q.config.VMStorePath, id, vhostFSSocket)
}
func (q *qemu) setupVirtiofsd(ctx context.Context) (err error) {
@ -795,7 +787,7 @@ func (q *qemu) StartVM(ctx context.Context, timeout int) error {
q.fds = []*os.File{}
}()
vmPath := filepath.Join(q.store.RunVMStoragePath(), q.id)
vmPath := filepath.Join(q.config.VMStorePath, q.id)
err := utils.MkdirAllWithInheritedOwner(vmPath, DirMode)
if err != nil {
return err
@ -1002,7 +994,7 @@ func (q *qemu) StopVM(ctx context.Context, waitOnly bool) error {
func (q *qemu) cleanupVM() error {
// Cleanup vm path
dir := filepath.Join(q.store.RunVMStoragePath(), q.id)
dir := filepath.Join(q.config.VMStorePath, q.id)
// If it's a symlink, remove both dir and the target.
// This can happen when vm template links a sandbox to a vm.
@ -1023,7 +1015,7 @@ func (q *qemu) cleanupVM() error {
}
if q.config.VMid != "" {
dir = filepath.Join(q.store.RunStoragePath(), q.config.VMid)
dir = filepath.Join(q.config.RunStorePath, q.config.VMid)
if err := os.RemoveAll(dir); err != nil {
q.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path")
}
@ -1149,7 +1141,7 @@ func (q *qemu) dumpSandboxMetaInfo(dumpSavePath string) {
dumpStatePath := filepath.Join(dumpSavePath, "state")
// copy state from /run/vc/sbs to memory dump directory
statePath := filepath.Join(q.store.RunStoragePath(), q.id)
statePath := filepath.Join(q.config.RunStorePath, q.id)
command := []string{"/bin/cp", "-ar", statePath, dumpStatePath}
q.Logger().WithField("command", command).Info("try to Save sandbox state")
if output, err := pkgUtils.RunCommandFull(command, true); err != nil {
@ -1822,7 +1814,7 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
}
// a new vCPU was added, update list of hotplugged vCPUs and Check if all vCPUs were added
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, CPUDevice{cpuID})
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, hv.CPUDevice{ID: cpuID})
hotpluggedVCPUs++
if hotpluggedVCPUs == amount {
// All vCPUs were hotplugged
@ -2030,7 +2022,7 @@ func (q *qemu) GetVMConsole(ctx context.Context, id string) (string, string, err
span, _ := katatrace.Trace(ctx, q.Logger(), "GetVMConsole", qemuTracingTags, map[string]string{"sandbox_id": q.id})
defer span.End()
consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket)
consoleURL, err := utils.BuildSocketPath(q.config.VMStorePath, id, consoleSocket)
if err != nil {
return consoleProtoUnix, "", err
}
@ -2469,7 +2461,7 @@ func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
return json.Marshal(&qp)
}
func (q *qemu) Save() (s persistapi.HypervisorState) {
func (q *qemu) Save() (s hv.HypervisorState) {
// If QEMU isn't even running, there isn't any state to Save
if q.stopped {
@ -2488,7 +2480,7 @@ func (q *qemu) Save() (s persistapi.HypervisorState) {
s.PCIeRootPort = q.state.PCIeRootPort
for _, bridge := range q.arch.getBridges() {
s.Bridges = append(s.Bridges, persistapi.Bridge{
s.Bridges = append(s.Bridges, hv.Bridge{
DeviceAddr: bridge.Devices,
Type: string(bridge.Type),
ID: bridge.ID,
@ -2497,14 +2489,14 @@ func (q *qemu) Save() (s persistapi.HypervisorState) {
}
for _, cpu := range q.state.HotpluggedVCPUs {
s.HotpluggedVCPUs = append(s.HotpluggedVCPUs, persistapi.CPUDevice{
s.HotpluggedVCPUs = append(s.HotpluggedVCPUs, hv.CPUDevice{
ID: cpu.ID,
})
}
return
}
func (q *qemu) Load(s persistapi.HypervisorState) {
func (q *qemu) Load(s hv.HypervisorState) {
q.state.UUID = s.UUID
q.state.HotpluggedMemory = s.HotpluggedMemory
q.state.HotplugVFIOOnRootBus = s.HotplugVFIOOnRootBus
@ -2516,7 +2508,7 @@ func (q *qemu) Load(s persistapi.HypervisorState) {
}
for _, cpu := range s.HotpluggedVCPUs {
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, CPUDevice{
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, hv.CPUDevice{
ID: cpu.ID,
})
}
@ -2543,7 +2535,7 @@ func (q *qemu) Check() error {
}
func (q *qemu) GenerateSocket(id string) (interface{}, error) {
return generateVMSocket(id, q.store.RunVMStoragePath())
return generateVMSocket(id, q.config.VMStorePath)
}
func (q *qemu) IsRateLimiterBuiltin() bool {

View File

@ -78,7 +78,10 @@ func TestQemuCreateVM(t *testing.T) {
store, err := persist.GetDriver()
assert.NoError(err)
q := &qemu{
store: store,
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
sandbox := &Sandbox{
ctx: context.Background(),
@ -94,7 +97,7 @@ func TestQemuCreateVM(t *testing.T) {
assert.NoError(err)
// Create parent dir path for hypervisor.json
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
parentDir := filepath.Join(store.RunStoragePath(), sandbox.id)
assert.NoError(os.MkdirAll(parentDir, DirMode))
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
@ -110,7 +113,10 @@ func TestQemuCreateVMMissingParentDirFail(t *testing.T) {
store, err := persist.GetDriver()
assert.NoError(err)
q := &qemu{
store: store,
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
sandbox := &Sandbox{
ctx: context.Background(),
@ -126,7 +132,7 @@ func TestQemuCreateVMMissingParentDirFail(t *testing.T) {
assert.NoError(err)
// Ensure parent dir path for hypervisor.json does not exist.
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
parentDir := filepath.Join(store.RunStoragePath(), sandbox.id)
assert.NoError(os.RemoveAll(parentDir))
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
@ -192,7 +198,10 @@ func TestQemuKnobs(t *testing.T) {
assert.NoError(err)
q := &qemu{
store: sandbox.store,
config: HypervisorConfig{
VMStorePath: sandbox.store.RunVMStoragePath(),
RunStorePath: sandbox.store.RunStoragePath(),
},
}
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
assert.NoError(err)
@ -325,11 +334,14 @@ func TestQemuGetSandboxConsole(t *testing.T) {
store, err := persist.GetDriver()
assert.NoError(err)
q := &qemu{
ctx: context.Background(),
store: store,
ctx: context.Background(),
config: HypervisorConfig{
VMStorePath: store.RunVMStoragePath(),
RunStorePath: store.RunStoragePath(),
},
}
sandboxID := "testSandboxID"
expected := filepath.Join(q.store.RunVMStoragePath(), sandboxID, consoleSocket)
expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket)
proto, result, err := q.GetVMConsole(q.ctx, sandboxID)
assert.NoError(err)
@ -460,7 +472,10 @@ func TestQemuFileBackedMem(t *testing.T) {
assert.NoError(err)
q := &qemu{
store: sandbox.store,
config: HypervisorConfig{
VMStorePath: sandbox.store.RunVMStoragePath(),
RunStorePath: sandbox.store.RunStoragePath(),
},
}
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
@ -475,7 +490,10 @@ func TestQemuFileBackedMem(t *testing.T) {
assert.NoError(err)
q = &qemu{
store: sandbox.store,
config: HypervisorConfig{
VMStorePath: sandbox.store.RunVMStoragePath(),
RunStorePath: sandbox.store.RunStoragePath(),
},
}
sandbox.config.HypervisorConfig.BootToBeTemplate = true
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
@ -491,7 +509,10 @@ func TestQemuFileBackedMem(t *testing.T) {
assert.NoError(err)
q = &qemu{
store: sandbox.store,
config: HypervisorConfig{
VMStorePath: sandbox.store.RunVMStoragePath(),
RunStorePath: sandbox.store.RunStoragePath(),
},
}
sandbox.config.HypervisorConfig.FileBackedMemRootDir = "/tmp/xyzabc"
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
@ -505,7 +526,10 @@ func TestQemuFileBackedMem(t *testing.T) {
assert.NoError(err)
q = &qemu{
store: sandbox.store,
config: HypervisorConfig{
VMStorePath: sandbox.store.RunVMStoragePath(),
RunStorePath: sandbox.store.RunStoragePath(),
},
}
sandbox.config.HypervisorConfig.EnableVhostUserStore = true
sandbox.config.HypervisorConfig.HugePages = true
@ -518,7 +542,10 @@ func TestQemuFileBackedMem(t *testing.T) {
assert.NoError(err)
q = &qemu{
store: sandbox.store,
config: HypervisorConfig{
VMStorePath: sandbox.store.RunVMStoragePath(),
RunStorePath: sandbox.store.RunStoragePath(),
},
}
sandbox.config.HypervisorConfig.EnableVhostUserStore = true
sandbox.config.HypervisorConfig.HugePages = false

View File

@ -530,7 +530,6 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
if s.store, err = persist.GetDriver(); err != nil || s.store == nil {
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
}
defer func() {
if retErr != nil {
s.Logger().WithError(retErr).Error("Create new sandbox failed")
@ -538,6 +537,9 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
}
}()
sandboxConfig.HypervisorConfig.VMStorePath = s.store.RunVMStoragePath()
sandboxConfig.HypervisorConfig.RunStorePath = s.store.RunStoragePath()
spec := s.GetPatchedOCISpec()
if spec != nil && spec.Process.SelinuxLabel != "" {
sandboxConfig.HypervisorConfig.SELinuxProcessLabel = spec.Process.SelinuxLabel

View File

@ -60,7 +60,7 @@ var testHyperstartTtySocket = ""
func cleanUp() {
os.RemoveAll(fs.MockRunStoragePath())
os.RemoveAll(fs.MockRunVMStoragePath())
syscall.Unmount(getSharePath(testSandboxID), syscall.MNT_DETACH|UmountNoFollow)
syscall.Unmount(GetSharePath(testSandboxID), syscall.MNT_DETACH|UmountNoFollow)
os.RemoveAll(testDir)
os.MkdirAll(testDir, DirMode)