mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-25 06:52:13 +00:00
virtcontainers/hypervisors: support new persist API
Fix hypervisor implementations and unit tests to support the new persist API Signed-off-by: Julio Montes <julio.montes@intel.com>
This commit is contained in:
parent
00307a70ee
commit
9585bc929a
@ -22,10 +22,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist"
|
||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/rootless"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
@ -39,17 +36,6 @@ const (
|
||||
uuidFile = "uuid.json"
|
||||
)
|
||||
|
||||
// VMUUIDStoragePath is the uuid directory.
|
||||
// It will contain all uuid info used by guest vm.
|
||||
var VMUUIDStoragePath = func() string {
|
||||
path := filepath.Join(fs.StorageRootPath(), UUIDPathSuffix)
|
||||
if rootless.IsRootless() {
|
||||
return filepath.Join(rootless.GetRootlessDir(), path)
|
||||
}
|
||||
return path
|
||||
|
||||
}
|
||||
|
||||
// ACRN currently supports only known UUIDs for security
|
||||
// reasons (FuSa). When launching VM, only these pre-defined
|
||||
// UUID should be used else VM launch will fail. The main
|
||||
@ -101,6 +87,7 @@ type Acrn struct {
|
||||
info AcrnInfo
|
||||
arch acrnArch
|
||||
ctx context.Context
|
||||
store persistapi.PersistDriver
|
||||
}
|
||||
|
||||
type acrnPlatformInfo struct {
|
||||
@ -328,7 +315,7 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error {
|
||||
|
||||
// The path might already exist, but in case of VM templating,
|
||||
// we have to create it since the sandbox has not created it yet.
|
||||
if err = os.MkdirAll(filepath.Join(fs.RunStoragePath(), id), DirMode); err != nil {
|
||||
if err = os.MkdirAll(filepath.Join(a.store.RunStoragePath(), id), DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -444,7 +431,7 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
|
||||
a.Logger().WithField("default-kernel-parameters", formatted).Debug()
|
||||
}
|
||||
|
||||
vmPath := filepath.Join(fs.RunVMStoragePath(), a.id)
|
||||
vmPath := filepath.Join(a.store.RunVMStoragePath(), a.id)
|
||||
err := os.MkdirAll(vmPath, DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -658,7 +645,7 @@ func (a *Acrn) getSandboxConsole(id string) (string, error) {
|
||||
span, _ := a.trace("getSandboxConsole")
|
||||
defer span.Finish()
|
||||
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, acrnConsoleSocket)
|
||||
return utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket)
|
||||
}
|
||||
|
||||
func (a *Acrn) saveSandbox() error {
|
||||
@ -734,7 +721,7 @@ func (a *Acrn) check() error {
|
||||
}
|
||||
|
||||
func (a *Acrn) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
return generateVMSocket(id, useVsock)
|
||||
return generateVMSocket(id, useVsock, a.store.RunVMStoragePath())
|
||||
}
|
||||
|
||||
// GetACRNUUIDBytes returns UUID bytes that is used for VM creation
|
||||
@ -797,10 +784,6 @@ func (a *Acrn) GetMaxSupportedACRNVM() (uint8, error) {
|
||||
}
|
||||
|
||||
func (a *Acrn) storeInfo() error {
|
||||
store, err := persist.GetDriver("fs")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
|
||||
|
||||
jsonOut, err := json.Marshal(a.info)
|
||||
@ -808,7 +791,7 @@ func (a *Acrn) storeInfo() error {
|
||||
return fmt.Errorf("Could not marshal data: %s", err)
|
||||
}
|
||||
|
||||
if err := store.GlobalWrite(relPath, jsonOut); err != nil {
|
||||
if err := a.store.GlobalWrite(relPath, jsonOut); err != nil {
|
||||
return fmt.Errorf("failed to write uuid to file: %v", err)
|
||||
}
|
||||
|
||||
@ -816,13 +799,9 @@ func (a *Acrn) storeInfo() error {
|
||||
}
|
||||
|
||||
func (a *Acrn) loadInfo() error {
|
||||
store, err := persist.GetDriver("fs")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
|
||||
|
||||
data, err := store.GlobalRead(relPath)
|
||||
data, err := a.store.GlobalRead(relPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read uuid from file: %v", err)
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func TestAcrnArchBaseAppendConsoles(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
acrnArchBase := newAcrnArchBase()
|
||||
|
||||
path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), consoleSocket)
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), sandboxID), consoleSocket)
|
||||
|
||||
expectedOut := []Device{
|
||||
ConsoleDevice{
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -194,11 +194,16 @@ func TestAcrnUpdateBlockDeviceInvalidIdx(t *testing.T) {
|
||||
|
||||
func TestAcrnGetSandboxConsole(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
|
||||
a := &Acrn{
|
||||
ctx: context.Background(),
|
||||
ctx: context.Background(),
|
||||
store: store,
|
||||
}
|
||||
sandboxID := "testSandboxID"
|
||||
expected := filepath.Join(fs.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
expected := filepath.Join(a.store.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
|
||||
result, err := a.getSandboxConsole(sandboxID)
|
||||
assert.NoError(err)
|
||||
@ -208,7 +213,12 @@ func TestAcrnGetSandboxConsole(t *testing.T) {
|
||||
func TestAcrnCreateSandbox(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
acrnConfig := newAcrnConfig()
|
||||
a := &Acrn{}
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
|
||||
a := &Acrn{
|
||||
store: store,
|
||||
}
|
||||
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
@ -218,7 +228,7 @@ func TestAcrnCreateSandbox(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := globalSandboxList.addSandbox(sandbox)
|
||||
err = globalSandboxList.addSandbox(sandbox)
|
||||
assert.NoError(err)
|
||||
|
||||
defer globalSandboxList.removeSandbox(sandbox.id)
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"time"
|
||||
|
||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
chclient "github.com/kata-containers/runtime/virtcontainers/pkg/cloud-hypervisor/client"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
@ -111,6 +110,7 @@ type cloudHypervisor struct {
|
||||
vmconfig chclient.VmConfig
|
||||
cmdOutput bytes.Buffer
|
||||
virtiofsd Virtiofsd
|
||||
store persistapi.PersistDriver
|
||||
}
|
||||
|
||||
var clhKernelParams = []Param{
|
||||
@ -303,7 +303,7 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
|
||||
|
||||
clh.Logger().WithField("function", "startSandbox").Info("starting Sandbox")
|
||||
|
||||
vmPath := filepath.Join(fs.RunVMStoragePath(), clh.id)
|
||||
vmPath := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
|
||||
err := os.MkdirAll(vmPath, DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -604,23 +604,23 @@ func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) virtioFsSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, virtioFsSocket)
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, virtioFsSocket)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) vsockSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhSocket)
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhSocket)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) serialPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhSerial)
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhSerial)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) apiSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhAPISocket)
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhAPISocket)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) logFilePath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, clhLogFile)
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhLogFile)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) waitVMM(timeout uint) error {
|
||||
@ -999,7 +999,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
|
||||
}
|
||||
|
||||
// cleanup vm path
|
||||
dir := filepath.Join(fs.RunVMStoragePath(), clh.id)
|
||||
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
|
||||
|
||||
// If it's a symlink, remove both dir and the target.
|
||||
link, err := filepath.EvalSymlinks(dir)
|
||||
@ -1028,7 +1028,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
|
||||
}
|
||||
|
||||
if clh.config.VMid != "" {
|
||||
dir = filepath.Join(fs.RunStoragePath(), clh.config.VMid)
|
||||
dir = filepath.Join(clh.store.RunStoragePath(), clh.config.VMid)
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
if !force {
|
||||
return err
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist"
|
||||
chclient "github.com/kata-containers/runtime/virtcontainers/pkg/cloud-hypervisor/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -172,33 +172,32 @@ func TestCloudHypervisorBootVM(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCloudHypervisorCleanupVM(t *testing.T) {
|
||||
clh := &cloudHypervisor{}
|
||||
assert := assert.New(t)
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err, "persist.GetDriver() unexpected error")
|
||||
|
||||
if err := clh.cleanupVM(true); err == nil {
|
||||
t.Errorf("cloudHypervisor.cleanupVM() expected error != %v", err)
|
||||
clh := &cloudHypervisor{
|
||||
store: store,
|
||||
}
|
||||
|
||||
err = clh.cleanupVM(true)
|
||||
assert.Error(err, "persist.GetDriver() expected error")
|
||||
|
||||
clh.id = "cleanVMID"
|
||||
|
||||
if err := clh.cleanupVM(true); err != nil {
|
||||
t.Errorf("cloudHypervisor.cleanupVM() expected error != %v", err)
|
||||
}
|
||||
err = clh.cleanupVM(true)
|
||||
assert.NoError(err, "persist.GetDriver() unexpected error")
|
||||
|
||||
dir := filepath.Join(fs.RunVMStoragePath(), clh.id)
|
||||
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
|
||||
os.MkdirAll(dir, os.ModePerm)
|
||||
|
||||
if err := clh.cleanupVM(false); err != nil {
|
||||
t.Errorf("cloudHypervisor.cleanupVM() expected error != %v", err)
|
||||
}
|
||||
_, err := os.Stat(dir)
|
||||
err = clh.cleanupVM(false)
|
||||
assert.NoError(err, "persist.GetDriver() unexpected error")
|
||||
|
||||
if err == nil {
|
||||
t.Errorf("dir should not exist %s", dir)
|
||||
}
|
||||
_, err = os.Stat(dir)
|
||||
assert.Error(err, "dir should not exist %s", dir)
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Errorf("Unexpected error = %v", err)
|
||||
}
|
||||
assert.True(os.IsNotExist(err), "persist.GetDriver() unexpected error")
|
||||
}
|
||||
|
||||
func TestClhCreateSandbox(t *testing.T) {
|
||||
@ -207,8 +206,12 @@ func TestClhCreateSandbox(t *testing.T) {
|
||||
clhConfig, err := newClhConfig()
|
||||
assert.NoError(err)
|
||||
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
|
||||
clh := &cloudHypervisor{
|
||||
config: clhConfig,
|
||||
store: store,
|
||||
}
|
||||
|
||||
sandbox := &Sandbox{
|
||||
@ -229,10 +232,14 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
|
||||
clhConfig, err := newClhConfig()
|
||||
assert.NoError(err)
|
||||
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
|
||||
clh := &cloudHypervisor{
|
||||
config: clhConfig,
|
||||
APIClient: &clhClientMock{},
|
||||
virtiofsd: &virtiofsdMock{},
|
||||
store: store,
|
||||
}
|
||||
|
||||
err = clh.startSandbox(10)
|
||||
|
@ -16,8 +16,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist"
|
||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
)
|
||||
@ -193,15 +193,26 @@ func (hType *HypervisorType) String() string {
|
||||
|
||||
// newHypervisor returns an hypervisor from and hypervisor type.
|
||||
func newHypervisor(hType HypervisorType) (hypervisor, error) {
|
||||
store, err := persist.GetDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch hType {
|
||||
case QemuHypervisor:
|
||||
return &qemu{}, nil
|
||||
return &qemu{
|
||||
store: store,
|
||||
}, nil
|
||||
case FirecrackerHypervisor:
|
||||
return &firecracker{}, nil
|
||||
case AcrnHypervisor:
|
||||
return &Acrn{}, nil
|
||||
return &Acrn{
|
||||
store: store,
|
||||
}, nil
|
||||
case ClhHypervisor:
|
||||
return &cloudHypervisor{}, nil
|
||||
return &cloudHypervisor{
|
||||
store: store,
|
||||
}, nil
|
||||
case MockHypervisor:
|
||||
return &mockHypervisor{}, nil
|
||||
default:
|
||||
@ -713,7 +724,7 @@ func getHypervisorPid(h hypervisor) int {
|
||||
return pids[0]
|
||||
}
|
||||
|
||||
func generateVMSocket(id string, useVsock bool) (interface{}, error) {
|
||||
func generateVMSocket(id string, useVsock bool, vmStogarePath string) (interface{}, error) {
|
||||
if useVsock {
|
||||
vhostFd, contextID, err := utils.FindContextID()
|
||||
if err != nil {
|
||||
@ -727,7 +738,7 @@ func generateVMSocket(id string, useVsock bool) (interface{}, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
path, err := utils.BuildSocketPath(filepath.Join(fs.RunVMStoragePath(), id), defaultSocketName)
|
||||
path, err := utils.BuildSocketPath(filepath.Join(vmStogarePath, id), defaultSocketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -72,12 +72,6 @@ func testNewHypervisorFromHypervisorType(t *testing.T, hypervisorType Hypervisor
|
||||
assert.Exactly(hy, expected)
|
||||
}
|
||||
|
||||
func TestNewHypervisorFromQemuHypervisorType(t *testing.T) {
|
||||
hypervisorType := QemuHypervisor
|
||||
expectedHypervisor := &qemu{}
|
||||
testNewHypervisorFromHypervisorType(t, hypervisorType, expectedHypervisor)
|
||||
}
|
||||
|
||||
func TestNewHypervisorFromMockHypervisorType(t *testing.T) {
|
||||
hypervisorType := MockHypervisor
|
||||
expectedHypervisor := &mockHypervisor{}
|
||||
@ -441,7 +435,7 @@ func genericTestRunningOnVMM(t *testing.T, data []testNestedVMMData) {
|
||||
func TestGenerateVMSocket(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
s, err := generateVMSocket("a", false)
|
||||
s, err := generateVMSocket("a", false, "")
|
||||
assert.NoError(err)
|
||||
socket, ok := s.(types.Socket)
|
||||
assert.True(ok)
|
||||
@ -453,7 +447,7 @@ func TestGenerateVMSocket(t *testing.T) {
|
||||
if tc.NotValid(ktu.NeedRoot()) {
|
||||
t.Skip(testDisabledAsNonRoot)
|
||||
}
|
||||
s, err = generateVMSocket("a", true)
|
||||
s, err = generateVMSocket("a", true, "")
|
||||
assert.NoError(err)
|
||||
vsock, ok := s.(types.VSock)
|
||||
assert.True(ok)
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
@ -97,6 +96,8 @@ type qemu struct {
|
||||
nvdimmCount int
|
||||
|
||||
stopped bool
|
||||
|
||||
store persistapi.PersistDriver
|
||||
}
|
||||
|
||||
const (
|
||||
@ -271,7 +272,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig) error {
|
||||
|
||||
// The path might already exist, but in case of VM templating,
|
||||
// we have to create it since the sandbox has not created it yet.
|
||||
if err = os.MkdirAll(filepath.Join(fs.RunStoragePath(), id), DirMode); err != nil {
|
||||
if err = os.MkdirAll(filepath.Join(q.store.RunStoragePath(), id), DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -326,7 +327,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
|
||||
}
|
||||
|
||||
func (q *qemu) qmpSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, qmpSocket)
|
||||
return utils.BuildSocketPath(q.store.RunVMStoragePath(), id, qmpSocket)
|
||||
}
|
||||
|
||||
func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) {
|
||||
@ -570,7 +571,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
|
||||
VGA: "none",
|
||||
GlobalParam: "kvm-pit.lost_tick_policy=discard",
|
||||
Bios: firmwarePath,
|
||||
PidFile: filepath.Join(fs.RunVMStoragePath(), q.id, "pid"),
|
||||
PidFile: filepath.Join(q.store.RunVMStoragePath(), q.id, "pid"),
|
||||
}
|
||||
|
||||
if ioThread != nil {
|
||||
@ -599,7 +600,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
|
||||
}
|
||||
|
||||
func (q *qemu) vhostFSSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, vhostFSSocket)
|
||||
return utils.BuildSocketPath(q.store.RunVMStoragePath(), id, vhostFSSocket)
|
||||
}
|
||||
|
||||
func (q *qemu) virtiofsdArgs(fd uintptr) []string {
|
||||
@ -753,7 +754,7 @@ func (q *qemu) startSandbox(timeout int) error {
|
||||
q.fds = []*os.File{}
|
||||
}()
|
||||
|
||||
vmPath := filepath.Join(fs.RunVMStoragePath(), q.id)
|
||||
vmPath := filepath.Join(q.store.RunVMStoragePath(), q.id)
|
||||
err := os.MkdirAll(vmPath, DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -930,7 +931,7 @@ func (q *qemu) stopSandbox() error {
|
||||
func (q *qemu) cleanupVM() error {
|
||||
|
||||
// cleanup vm path
|
||||
dir := filepath.Join(fs.RunVMStoragePath(), q.id)
|
||||
dir := filepath.Join(q.store.RunVMStoragePath(), q.id)
|
||||
|
||||
// If it's a symlink, remove both dir and the target.
|
||||
// This can happen when vm template links a sandbox to a vm.
|
||||
@ -951,7 +952,7 @@ func (q *qemu) cleanupVM() error {
|
||||
}
|
||||
|
||||
if q.config.VMid != "" {
|
||||
dir = filepath.Join(fs.RunStoragePath(), q.config.VMid)
|
||||
dir = filepath.Join(q.store.RunStoragePath(), q.config.VMid)
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
q.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path")
|
||||
}
|
||||
@ -1651,7 +1652,7 @@ func (q *qemu) getSandboxConsole(id string) (string, error) {
|
||||
span, _ := q.trace("getSandboxConsole")
|
||||
defer span.Finish()
|
||||
|
||||
return utils.BuildSocketPath(fs.RunVMStoragePath(), id, consoleSocket)
|
||||
return utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket)
|
||||
}
|
||||
|
||||
func (q *qemu) saveSandbox() error {
|
||||
@ -2135,5 +2136,5 @@ func (q *qemu) check() error {
|
||||
}
|
||||
|
||||
func (q *qemu) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
return generateVMSocket(id, useVsock)
|
||||
return generateVMSocket(id, useVsock, q.store.RunVMStoragePath())
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ func TestQemuArchBaseAppendConsoles(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
qemuArchBase := newQemuArchBase()
|
||||
|
||||
path := filepath.Join(filepath.Join(fs.RunStoragePath(), sandboxID), consoleSocket)
|
||||
path := filepath.Join(filepath.Join(fs.MockRunStoragePath(), sandboxID), consoleSocket)
|
||||
|
||||
expectedOut := []govmmQemu.Device{
|
||||
govmmQemu.SerialDevice{
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
govmmQemu "github.com/intel/govmm/qemu"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -75,9 +74,13 @@ func TestQemuKernelParameters(t *testing.T) {
|
||||
|
||||
func TestQemuCreateSandbox(t *testing.T) {
|
||||
qemuConfig := newQemuConfig()
|
||||
q := &qemu{}
|
||||
assert := assert.New(t)
|
||||
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
q := &qemu{
|
||||
store: store,
|
||||
}
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: "testSandbox",
|
||||
@ -88,11 +91,11 @@ func TestQemuCreateSandbox(t *testing.T) {
|
||||
|
||||
// Create the hypervisor fake binary
|
||||
testQemuPath := filepath.Join(testDir, testHypervisor)
|
||||
_, err := os.Create(testQemuPath)
|
||||
_, err = os.Create(testQemuPath)
|
||||
assert.NoError(err)
|
||||
|
||||
// Create parent dir path for hypervisor.json
|
||||
parentDir := filepath.Join(fs.RunStoragePath(), sandbox.id)
|
||||
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
|
||||
assert.NoError(os.MkdirAll(parentDir, DirMode))
|
||||
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
@ -103,9 +106,13 @@ func TestQemuCreateSandbox(t *testing.T) {
|
||||
|
||||
func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) {
|
||||
qemuConfig := newQemuConfig()
|
||||
q := &qemu{}
|
||||
assert := assert.New(t)
|
||||
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
q := &qemu{
|
||||
store: store,
|
||||
}
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
id: "testSandbox",
|
||||
@ -116,11 +123,11 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) {
|
||||
|
||||
// Create the hypervisor fake binary
|
||||
testQemuPath := filepath.Join(testDir, testHypervisor)
|
||||
_, err := os.Create(testQemuPath)
|
||||
_, err = os.Create(testQemuPath)
|
||||
assert.NoError(err)
|
||||
|
||||
// Ensure parent dir path for hypervisor.json does not exist.
|
||||
parentDir := filepath.Join(fs.RunStoragePath(), sandbox.id)
|
||||
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
|
||||
assert.NoError(os.RemoveAll(parentDir))
|
||||
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
@ -276,11 +283,14 @@ func TestQemuAddDeviceKataVSOCK(t *testing.T) {
|
||||
|
||||
func TestQemuGetSandboxConsole(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
q := &qemu{
|
||||
ctx: context.Background(),
|
||||
ctx: context.Background(),
|
||||
store: store,
|
||||
}
|
||||
sandboxID := "testSandboxID"
|
||||
expected := filepath.Join(fs.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
expected := filepath.Join(q.store.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
|
||||
result, err := q.getSandboxConsole(sandboxID)
|
||||
assert.NoError(err)
|
||||
@ -415,7 +425,9 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
sandbox, err := createQemuSandboxConfig()
|
||||
assert.NoError(err)
|
||||
|
||||
q := &qemu{}
|
||||
q := &qemu{
|
||||
store: sandbox.newStore,
|
||||
}
|
||||
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
@ -428,7 +440,9 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
sandbox, err = createQemuSandboxConfig()
|
||||
assert.NoError(err)
|
||||
|
||||
q = &qemu{}
|
||||
q = &qemu{
|
||||
store: sandbox.newStore,
|
||||
}
|
||||
sandbox.config.HypervisorConfig.BootToBeTemplate = true
|
||||
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
|
||||
sandbox.config.HypervisorConfig.MemoryPath = fallbackFileBackedMemDir
|
||||
@ -442,7 +456,9 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
sandbox, err = createQemuSandboxConfig()
|
||||
assert.NoError(err)
|
||||
|
||||
q = &qemu{}
|
||||
q = &qemu{
|
||||
store: sandbox.newStore,
|
||||
}
|
||||
sandbox.config.HypervisorConfig.FileBackedMemRootDir = "/tmp/xyzabc"
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
@ -462,7 +478,7 @@ func createQemuSandboxConfig() (*Sandbox, error) {
|
||||
},
|
||||
}
|
||||
|
||||
newStore, err := persist.GetDriver("fs")
|
||||
newStore, err := persist.GetDriver()
|
||||
if err != nil {
|
||||
return &Sandbox{}, err
|
||||
}
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
pb "github.com/kata-containers/runtime/protocols/cache"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist"
|
||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -159,7 +158,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
|
||||
virtLog.WithField("vm", id).WithField("config", config).Info("create new vm")
|
||||
|
||||
store, err := persist.GetDriver("fs")
|
||||
store, err := persist.GetDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -178,7 +177,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
|
||||
// 2. setup agent
|
||||
agent := newAgent(config.AgentType)
|
||||
vmSharePath := buildVMSharePath(id)
|
||||
vmSharePath := buildVMSharePath(id, store.RunVMStoragePath())
|
||||
err = agent.configure(hypervisor, id, vmSharePath, isProxyBuiltIn(config.ProxyType), config.AgentConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -243,7 +242,7 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := persist.GetDriver("fs")
|
||||
store, err := persist.GetDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -284,8 +283,8 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildVMSharePath(id string) string {
|
||||
return filepath.Join(fs.RunVMStoragePath(), id, "shared")
|
||||
func buildVMSharePath(id string, vmStoragePath string) string {
|
||||
return filepath.Join(vmStoragePath, id, "shared")
|
||||
}
|
||||
|
||||
func (v *VM) logger() logrus.FieldLogger {
|
||||
@ -411,7 +410,7 @@ func (v *VM) assignSandbox(s *Sandbox) error {
|
||||
// - link vm socket from sandbox dir (/run/vc/vm/sbid/<kata.sock>) to vm dir (/run/vc/vm/vmid/<kata.sock>)
|
||||
// - link 9pfs share path from sandbox dir (/run/kata-containers/shared/sandboxes/sbid/) to vm dir (/run/vc/vm/vmid/shared/)
|
||||
|
||||
vmSharePath := buildVMSharePath(v.id)
|
||||
vmSharePath := buildVMSharePath(v.id, v.store.RunVMStoragePath())
|
||||
vmSockDir := filepath.Join(v.store.RunVMStoragePath(), v.id)
|
||||
sbSharePath := s.agent.getSharePath(s.id)
|
||||
sbSockDir := filepath.Join(v.store.RunVMStoragePath(), s.id)
|
||||
|
Loading…
Reference in New Issue
Block a user