mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-08-13 13:46:46 +00:00
persist: remove VCStore from hypervisor
Remove usage of VCStore from hypervisors. Signed-off-by: Wei Zhang <weizhang555.zw@gmail.com>
This commit is contained in:
parent
687f2dbe84
commit
633748aa76
@ -6,7 +6,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
@ -14,7 +13,6 @@ import (
|
||||
"unsafe"
|
||||
|
||||
vc "github.com/kata-containers/runtime/virtcontainers"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -238,12 +236,7 @@ func acrnIsUsable() error {
|
||||
kataLog.WithField("device", acrnDevice).Info("device available")
|
||||
|
||||
acrnInst := vc.Acrn{}
|
||||
vcStore, err := store.NewVCSandboxStore(context.Background(), "kata-check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uuidStr, err := acrnInst.GetNextAvailableUUID(vcStore)
|
||||
uuidStr, err := acrnInst.GetNextAvailableUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -20,6 +21,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/kata-containers/runtime/pkg/rootless"
|
||||
"github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
|
||||
@ -28,6 +30,25 @@ import (
|
||||
"github.com/kata-containers/runtime/virtcontainers/utils"
|
||||
)
|
||||
|
||||
// Since ACRN is using the store in a quite abnormal way, let's first draw it back from store to here
|
||||
|
||||
// UUIDPathSuffix is the suffix used for uuid storage
|
||||
const (
|
||||
UUIDPathSuffix = "uuid"
|
||||
uuidFile = "uuid.json"
|
||||
)
|
||||
|
||||
// VMUUIDStoragePath is the uuid directory.
|
||||
// It will contain all uuid info used by guest vm.
|
||||
var VMUUIDStoragePath = func() string {
|
||||
path := filepath.Join("/run/vc", UUIDPathSuffix)
|
||||
if rootless.IsRootless() {
|
||||
return filepath.Join(rootless.GetRootlessDir(), path)
|
||||
}
|
||||
return path
|
||||
|
||||
}
|
||||
|
||||
// ACRN currently supports only known UUIDs for security
|
||||
// reasons (FuSa). When launching VM, only these pre-defined
|
||||
// UUID should be used else VM launch will fail. The main
|
||||
@ -73,7 +94,6 @@ type AcrnState struct {
|
||||
// Acrn is an Hypervisor interface implementation for the Linux acrn hypervisor.
|
||||
type Acrn struct {
|
||||
id string
|
||||
store *store.VCStore
|
||||
config HypervisorConfig
|
||||
acrnConfig Config
|
||||
state AcrnState
|
||||
@ -276,7 +296,7 @@ func (a *Acrn) buildDevices(imagePath string) ([]Device, error) {
|
||||
}
|
||||
|
||||
// setup sets the Acrn structure up.
|
||||
func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error {
|
||||
func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error {
|
||||
span, _ := a.trace("setup")
|
||||
defer span.Finish()
|
||||
|
||||
@ -286,24 +306,19 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
|
||||
}
|
||||
|
||||
a.id = id
|
||||
a.store = vcStore
|
||||
a.config = *hypervisorConfig
|
||||
a.arch = newAcrnArch(a.config)
|
||||
|
||||
var create bool
|
||||
var uuid string
|
||||
|
||||
if a.store != nil { //use old store
|
||||
if err = a.store.Load(store.Hypervisor, &a.state); err != nil {
|
||||
create = true
|
||||
}
|
||||
} else if a.state.UUID == "" { // new store
|
||||
if a.state.UUID == "" {
|
||||
create = true
|
||||
}
|
||||
|
||||
if create {
|
||||
a.Logger().Debug("Setting UUID")
|
||||
if uuid, err = a.GetNextAvailableUUID(nil); err != nil {
|
||||
if uuid, err = a.GetNextAvailableUUID(); err != nil {
|
||||
return err
|
||||
}
|
||||
a.state.UUID = uuid
|
||||
@ -316,10 +331,6 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
|
||||
return err
|
||||
}
|
||||
|
||||
if err = a.storeState(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = a.storeInfo(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -348,14 +359,14 @@ func (a *Acrn) createDummyVirtioBlkDev(devices []Device) ([]Device, error) {
|
||||
}
|
||||
|
||||
// createSandbox is the Hypervisor sandbox creation.
|
||||
func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error {
|
||||
func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
|
||||
// Save the tracing context
|
||||
a.ctx = ctx
|
||||
|
||||
span, _ := a.trace("createSandbox")
|
||||
defer span.Finish()
|
||||
|
||||
if err := a.setup(id, hypervisorConfig, store); err != nil {
|
||||
if err := a.setup(id, hypervisorConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -458,11 +469,6 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Store VMM information
|
||||
if err = a.storeState(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -499,7 +505,7 @@ func (a *Acrn) stopSandbox() (err error) {
|
||||
uuid := a.state.UUID
|
||||
Idx := acrnUUIDsToIdx[uuid]
|
||||
|
||||
if err = a.store.Load(store.UUID, &a.info); err != nil {
|
||||
if err = a.loadInfo(); err != nil {
|
||||
a.Logger().Info("Failed to load UUID availabiity info")
|
||||
return err
|
||||
}
|
||||
@ -698,7 +704,7 @@ func (a *Acrn) getPids() []int {
|
||||
return []int{a.state.PID}
|
||||
}
|
||||
|
||||
func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
|
||||
func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
|
||||
return errors.New("acrn is not supported by VM cache")
|
||||
}
|
||||
|
||||
@ -737,20 +743,14 @@ func (a *Acrn) GetACRNUUIDBytes(uid string) (uuid.UUID, error) {
|
||||
|
||||
// GetNextAvailableUUID returns next available UUID VM creation
|
||||
// If no validl UUIDs are available it returns err.
|
||||
func (a *Acrn) GetNextAvailableUUID(uuidstore *store.VCStore) (string, error) {
|
||||
func (a *Acrn) GetNextAvailableUUID() (string, error) {
|
||||
var MaxVMSupported uint8
|
||||
var Idx uint8
|
||||
var uuidStr string
|
||||
var err error
|
||||
|
||||
if uuidstore == nil {
|
||||
uuidstore = a.store
|
||||
}
|
||||
|
||||
if uuidstore != nil { //use old store
|
||||
if err = uuidstore.Load(store.UUID, &a.info); err != nil {
|
||||
a.Logger().Infof("Load UUID store failed")
|
||||
}
|
||||
if err = a.loadInfo(); err != nil {
|
||||
a.Logger().Infof("Load UUID store failed")
|
||||
}
|
||||
|
||||
if MaxVMSupported, err = a.GetMaxSupportedACRNVM(); err != nil {
|
||||
@ -795,22 +795,79 @@ func (a *Acrn) GetMaxSupportedACRNVM() (uint8, error) {
|
||||
return platformInfo.maxKataContainers, nil
|
||||
}
|
||||
|
||||
func (a *Acrn) storeState() error {
|
||||
if a.store != nil {
|
||||
if err := a.store.Store(store.Hypervisor, a.state); err != nil {
|
||||
a.Logger().WithError(err).Error("failed to store acrn state")
|
||||
func (a *Acrn) storeInfo() error {
|
||||
dirPath := VMUUIDStoragePath()
|
||||
|
||||
_, err := os.Stat(dirPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Root directory
|
||||
a.Logger().WithField("path", dirPath).Debugf("Creating UUID directory")
|
||||
if err := os.MkdirAll(dirPath, store.DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dirf, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dirf.Close()
|
||||
|
||||
if err := syscall.Flock(int(dirf.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// write data
|
||||
f, err := os.OpenFile(filepath.Join(dirPath, uuidFile), os.O_RDWR|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to store information into uuid.json: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
jsonOut, err := json.Marshal(a.info)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not marshall data: %s", err)
|
||||
}
|
||||
f.Write(jsonOut)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Acrn) storeInfo() error {
|
||||
if a.store != nil {
|
||||
if err := a.store.Store(store.UUID, a.info); err != nil {
|
||||
a.Logger().WithError(err).Error("failed to store acrn info")
|
||||
return err
|
||||
}
|
||||
func (a *Acrn) loadInfo() error {
|
||||
dirPath := VMUUIDStoragePath()
|
||||
|
||||
_, err := os.Stat(dirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load ACRN information: %v", err)
|
||||
}
|
||||
|
||||
dirf, err := os.Open(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.Flock(int(dirf.Fd()), syscall.LOCK_SH|syscall.LOCK_NB); err != nil {
|
||||
dirf.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
defer dirf.Close()
|
||||
|
||||
// write data
|
||||
f, err := os.Open(filepath.Join(dirPath, uuidFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load information into uuid.json: %v", err)
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(f)
|
||||
if dec != nil {
|
||||
return fmt.Errorf("failed to create json decoder")
|
||||
}
|
||||
|
||||
err = dec.Decode(&a.info)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not decode data: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ func TestAcrnCreateSandbox(t *testing.T) {
|
||||
//set PID to 1 to ignore hypercall to get UUID and set a random UUID
|
||||
a.state.PID = 1
|
||||
a.state.UUID = "f81d4fae-7dec-11d0-a765-00a0c91e6bf6"
|
||||
err = a.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false)
|
||||
err = a.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
assert.Exactly(acrnConfig, a.config)
|
||||
}
|
||||
|
@ -104,7 +104,6 @@ func (s *CloudHypervisorState) reset() {
|
||||
type cloudHypervisor struct {
|
||||
id string
|
||||
state CloudHypervisorState
|
||||
store *store.VCStore
|
||||
config HypervisorConfig
|
||||
ctx context.Context
|
||||
APIClient clhClient
|
||||
@ -139,7 +138,7 @@ var clhDebugKernelParams = []Param{
|
||||
|
||||
// For cloudHypervisor this call only sets the internal structure up.
|
||||
// The VM will be created and started through startSandbox().
|
||||
func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error {
|
||||
func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
|
||||
clh.ctx = ctx
|
||||
|
||||
span, _ := clh.trace("createSandbox")
|
||||
@ -151,7 +150,6 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
|
||||
}
|
||||
|
||||
clh.id = id
|
||||
clh.store = vcStore
|
||||
clh.config = *hypervisorConfig
|
||||
clh.state.state = clhNotReady
|
||||
|
||||
@ -187,12 +185,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
|
||||
|
||||
}
|
||||
|
||||
// No need to return an error from there since there might be nothing
|
||||
// to fetch if this is the first time the hypervisor is created.
|
||||
err = clh.store.Load(store.Hypervisor, &clh.state)
|
||||
if err != nil {
|
||||
clh.Logger().WithField("function", "createSandbox").WithError(err).Info("Sandbox not found creating ")
|
||||
} else {
|
||||
if clh.state.PID > 0 {
|
||||
clh.Logger().WithField("function", "createSandbox").Info("Sandbox already exist, loading from state")
|
||||
clh.virtiofsd = &virtiofsd{
|
||||
PID: clh.state.VirtiofsdPID,
|
||||
@ -203,6 +196,10 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
|
||||
return nil
|
||||
}
|
||||
|
||||
// No need to return an error from there since there might be nothing
|
||||
// to fetch if this is the first time the hypervisor is created.
|
||||
clh.Logger().WithField("function", "createSandbox").WithError(err).Info("Sandbox not found creating ")
|
||||
|
||||
// Set initial memomory size of the virtual machine
|
||||
clh.vmconfig.Memory.Size = int64(clh.config.MemorySize) << utils.MibToBytesShift
|
||||
clh.vmconfig.Memory.File = "/dev/shm"
|
||||
@ -323,9 +320,6 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
|
||||
return err
|
||||
}
|
||||
clh.state.VirtiofsdPID = pid
|
||||
if err = clh.storeState(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return errors.New("cloud-hypervisor only supports virtio based file sharing")
|
||||
}
|
||||
@ -350,10 +344,6 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
|
||||
}
|
||||
|
||||
clh.state.state = clhReady
|
||||
if err = clh.storeState(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -431,7 +421,7 @@ func (clh *cloudHypervisor) stopSandbox() (err error) {
|
||||
return clh.terminate()
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
|
||||
func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
|
||||
return errors.New("cloudHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
@ -442,6 +432,7 @@ func (clh *cloudHypervisor) toGrpc() ([]byte, error) {
|
||||
func (clh *cloudHypervisor) save() (s persistapi.HypervisorState) {
|
||||
s.Pid = clh.state.PID
|
||||
s.Type = string(ClhHypervisor)
|
||||
s.VirtiofsdPid = clh.state.VirtiofsdPID
|
||||
return
|
||||
}
|
||||
|
||||
@ -589,7 +580,6 @@ func (clh *cloudHypervisor) terminate() (err error) {
|
||||
|
||||
func (clh *cloudHypervisor) reset() {
|
||||
clh.state.reset()
|
||||
clh.storeState()
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||
@ -633,17 +623,7 @@ func (clh *cloudHypervisor) logFilePath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(store.RunVMStoragePath(), id, clhLogFile)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) storeState() error {
|
||||
if clh.store != nil {
|
||||
if err := clh.store.Store(store.Hypervisor, clh.state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) waitVMM(timeout uint) error {
|
||||
|
||||
clhRunning, err := clh.isClhRunning(timeout)
|
||||
|
||||
if err != nil {
|
||||
|
@ -143,7 +143,6 @@ type firecracker struct {
|
||||
firecrackerd *exec.Cmd //Tracks the firecracker process itself
|
||||
connection *client.Firecracker //Tracks the current active connection
|
||||
|
||||
store *store.VCStore
|
||||
ctx context.Context
|
||||
config HypervisorConfig
|
||||
pendingDevices []firecrackerDevice // Devices to be added before the FC VM ready
|
||||
@ -222,7 +221,7 @@ func (fc *firecracker) bindMount(ctx context.Context, source, destination string
|
||||
|
||||
// For firecracker this call only sets the internal structure up.
|
||||
// The sandbox will be created and started through startSandbox().
|
||||
func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error {
|
||||
func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
|
||||
fc.ctx = ctx
|
||||
|
||||
span, _ := fc.trace("createSandbox")
|
||||
@ -231,7 +230,6 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
|
||||
//TODO: check validity of the hypervisor config provided
|
||||
//https://github.com/kata-containers/runtime/issues/1065
|
||||
fc.id = id
|
||||
fc.store = vcStore
|
||||
fc.state.set(notReady)
|
||||
fc.config = *hypervisorConfig
|
||||
fc.stateful = stateful
|
||||
@ -263,15 +261,6 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
|
||||
|
||||
fc.fcConfig = &types.FcConfig{}
|
||||
fc.fcConfigPath = filepath.Join(fc.vmPath, defaultFcConfig)
|
||||
|
||||
// No need to return an error from there since there might be nothing
|
||||
// to fetch if this is the first time the hypervisor is created.
|
||||
if fc.store != nil {
|
||||
if err := fc.store.Load(store.Hypervisor, &fc.info); err != nil {
|
||||
fc.Logger().WithField("function", "init").WithError(err).Info("No info could be fetched")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -385,16 +374,6 @@ func (fc *firecracker) fcInit(timeout int) error {
|
||||
}
|
||||
|
||||
// Fetch sandbox network to be able to access it from the sandbox structure.
|
||||
var networkNS NetworkNamespace
|
||||
if fc.store != nil {
|
||||
if err := fc.store.Load(store.Network, &networkNS); err == nil {
|
||||
if networkNS.NetNsPath == "" {
|
||||
fc.Logger().WithField("NETWORK NAMESPACE NULL", networkNS).Warn()
|
||||
}
|
||||
fc.netNSPath = networkNS.NetNsPath
|
||||
}
|
||||
}
|
||||
|
||||
err := os.MkdirAll(fc.jailerRoot, store.DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -476,11 +455,6 @@ func (fc *firecracker) fcInit(timeout int) error {
|
||||
fc.Logger().WithField("fcInit failed:", err).Debug()
|
||||
return err
|
||||
}
|
||||
|
||||
// Store VMM information
|
||||
if fc.store != nil {
|
||||
return fc.store.Store(store.Hypervisor, fc.info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1155,7 +1129,7 @@ func (fc *firecracker) getPids() []int {
|
||||
return []int{fc.info.PID}
|
||||
}
|
||||
|
||||
func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
|
||||
func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
|
||||
return errors.New("firecracker is not supported by VM cache")
|
||||
}
|
||||
|
||||
|
@ -736,7 +736,7 @@ func generateVMSocket(id string, useVsock bool) (interface{}, error) {
|
||||
// hypervisor is the virtcontainers hypervisor interface.
|
||||
// The default hypervisor implementation is Qemu.
|
||||
type hypervisor interface {
|
||||
createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error
|
||||
createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error
|
||||
startSandbox(timeout int) error
|
||||
stopSandbox() error
|
||||
pauseSandbox() error
|
||||
@ -756,7 +756,7 @@ type hypervisor interface {
|
||||
// getPids returns a slice of hypervisor related process ids.
|
||||
// The hypervisor pid must be put at index 0.
|
||||
getPids() []int
|
||||
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error
|
||||
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error
|
||||
toGrpc() ([]byte, error)
|
||||
check() error
|
||||
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"os"
|
||||
|
||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||
)
|
||||
|
||||
@ -27,7 +26,7 @@ func (m *mockHypervisor) hypervisorConfig() HypervisorConfig {
|
||||
return HypervisorConfig{}
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, store *store.VCStore, stateful bool) error {
|
||||
func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
|
||||
err := hypervisorConfig.valid()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -108,7 +107,7 @@ func (m *mockHypervisor) getPids() []int {
|
||||
return []int{m.mockPid}
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
|
||||
func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
|
||||
return errors.New("mockHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// wrong config
|
||||
err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false)
|
||||
err := m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.Error(err)
|
||||
|
||||
sandbox.config.HypervisorConfig = HypervisorConfig{
|
||||
@ -40,7 +40,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
|
||||
HypervisorPath: fmt.Sprintf("%s/%s", testDir, testHypervisor),
|
||||
}
|
||||
|
||||
err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, nil, false)
|
||||
err = m.createSandbox(ctx, sandbox.config.ID, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
|
@ -77,8 +77,6 @@ type QemuState struct {
|
||||
type qemu struct {
|
||||
id string
|
||||
|
||||
store *store.VCStore
|
||||
|
||||
config HypervisorConfig
|
||||
|
||||
qmpMonitorCh qmpChannel
|
||||
@ -226,7 +224,7 @@ func (q *qemu) trace(name string) (opentracing.Span, context.Context) {
|
||||
}
|
||||
|
||||
// setup sets the Qemu structure up.
|
||||
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore) error {
|
||||
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig) error {
|
||||
span, _ := q.trace("setup")
|
||||
defer span.Finish()
|
||||
|
||||
@ -236,7 +234,6 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
|
||||
}
|
||||
|
||||
q.id = id
|
||||
q.store = vcStore
|
||||
q.config = *hypervisorConfig
|
||||
q.arch = newQemuArch(q.config)
|
||||
|
||||
@ -255,12 +252,7 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
|
||||
}
|
||||
|
||||
var create bool
|
||||
if q.store != nil { //use old store
|
||||
if err := q.store.Load(store.Hypervisor, &q.state); err != nil {
|
||||
// hypervisor doesn't exist, create new one
|
||||
create = true
|
||||
}
|
||||
} else if q.state.UUID == "" { // new store
|
||||
if q.state.UUID == "" {
|
||||
create = true
|
||||
}
|
||||
|
||||
@ -280,10 +272,6 @@ func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig, vcStore *sto
|
||||
if err = os.MkdirAll(store.SandboxRuntimeRootPath(id), store.DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = q.storeState(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nested, err := RunningOnVMM(procCPUInfo)
|
||||
@ -463,14 +451,14 @@ func (q *qemu) setupFileBackedMem(knobs *govmmQemu.Knobs, memory *govmmQemu.Memo
|
||||
}
|
||||
|
||||
// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu.
|
||||
func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, vcStore *store.VCStore, stateful bool) error {
|
||||
func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig, stateful bool) error {
|
||||
// Save the tracing context
|
||||
q.ctx = ctx
|
||||
|
||||
span, _ := q.trace("createSandbox")
|
||||
defer span.Finish()
|
||||
|
||||
if err := q.setup(id, hypervisorConfig, vcStore); err != nil {
|
||||
if err := q.setup(id, hypervisorConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -729,9 +717,6 @@ func (q *qemu) startSandbox(timeout int) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = q.storeState(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var strErr string
|
||||
@ -1289,7 +1274,7 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
|
||||
return data, err
|
||||
}
|
||||
|
||||
return data, q.storeState()
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
@ -1301,7 +1286,7 @@ func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int
|
||||
return data, err
|
||||
}
|
||||
|
||||
return data, q.storeState()
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugCPUs(vcpus uint32, op operation) (uint32, error) {
|
||||
@ -1383,15 +1368,10 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
|
||||
hotpluggedVCPUs++
|
||||
if hotpluggedVCPUs == amount {
|
||||
// All vCPUs were hotplugged
|
||||
return amount, q.storeState()
|
||||
return amount, nil
|
||||
}
|
||||
}
|
||||
|
||||
// All vCPUs were NOT hotplugged
|
||||
if err := q.storeState(); err != nil {
|
||||
q.Logger().Errorf("failed to save hypervisor state after hotplug %d vCPUs: %v", hotpluggedVCPUs, err)
|
||||
}
|
||||
|
||||
return hotpluggedVCPUs, fmt.Errorf("failed to hot add vCPUs: only %d vCPUs of %d were added", hotpluggedVCPUs, amount)
|
||||
}
|
||||
|
||||
@ -1408,7 +1388,6 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
|
||||
// get the last vCPUs and try to remove it
|
||||
cpu := q.state.HotpluggedVCPUs[len(q.state.HotpluggedVCPUs)-1]
|
||||
if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, cpu.ID); err != nil {
|
||||
q.storeState()
|
||||
return i, fmt.Errorf("failed to hotunplug CPUs, only %d CPUs were hotunplugged: %v", i, err)
|
||||
}
|
||||
|
||||
@ -1416,7 +1395,7 @@ func (q *qemu) hotplugRemoveCPUs(amount uint32) (uint32, error) {
|
||||
q.state.HotpluggedVCPUs = q.state.HotpluggedVCPUs[:len(q.state.HotpluggedVCPUs)-1]
|
||||
}
|
||||
|
||||
return amount, q.storeState()
|
||||
return amount, nil
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) (int, error) {
|
||||
@ -1522,7 +1501,7 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
|
||||
}
|
||||
}
|
||||
q.state.HotpluggedMemory += memDev.sizeMB
|
||||
return memDev.sizeMB, q.storeState()
|
||||
return memDev.sizeMB, nil
|
||||
}
|
||||
|
||||
func (q *qemu) pauseSandbox() error {
|
||||
@ -1938,7 +1917,7 @@ type qemuGrpc struct {
|
||||
QemuSMP govmmQemu.SMP
|
||||
}
|
||||
|
||||
func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, store *store.VCStore, j []byte) error {
|
||||
func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
|
||||
var qp qemuGrpc
|
||||
err := json.Unmarshal(j, &qp)
|
||||
if err != nil {
|
||||
@ -1946,7 +1925,6 @@ func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
|
||||
}
|
||||
|
||||
q.id = qp.ID
|
||||
q.store = store
|
||||
q.config = *hypervisorConfig
|
||||
q.qmpMonitorCh.ctx = ctx
|
||||
q.qmpMonitorCh.path = qp.QmpChannelpath
|
||||
@ -1978,16 +1956,6 @@ func (q *qemu) toGrpc() ([]byte, error) {
|
||||
return json.Marshal(&qp)
|
||||
}
|
||||
|
||||
func (q *qemu) storeState() error {
|
||||
if q.store != nil {
|
||||
q.state.Bridges = q.arch.getBridges()
|
||||
if err := q.store.Store(store.Hypervisor, q.state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *qemu) save() (s persistapi.HypervisorState) {
|
||||
pids := q.getPids()
|
||||
if len(pids) != 0 {
|
||||
|
@ -99,7 +99,7 @@ func TestQemuCreateSandbox(t *testing.T) {
|
||||
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
|
||||
assert.NoError(os.MkdirAll(parentDir, store.DirMode))
|
||||
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
assert.NoError(os.RemoveAll(parentDir))
|
||||
assert.Exactly(qemuConfig, q.config)
|
||||
@ -131,7 +131,7 @@ func TestQemuCreateSandboxMissingParentDirFail(t *testing.T) {
|
||||
parentDir := store.SandboxConfigurationRootPath(sandbox.id)
|
||||
assert.NoError(os.RemoveAll(parentDir))
|
||||
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@ -364,11 +364,7 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) {
|
||||
config: qemuConfig,
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCSandboxStore(q.ctx, q.id)
|
||||
assert.NoError(err)
|
||||
q.store = vcStore
|
||||
|
||||
_, err = q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
|
||||
_, err := q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
|
||||
assert.Error(err)
|
||||
_, err = q.hotplugRemoveDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
|
||||
assert.Error(err)
|
||||
@ -414,7 +410,7 @@ func TestQemuGrpc(t *testing.T) {
|
||||
assert.Nil(err)
|
||||
|
||||
var q2 qemu
|
||||
err = q2.fromGrpc(context.Background(), &config, nil, json)
|
||||
err = q2.fromGrpc(context.Background(), &config, json)
|
||||
assert.Nil(err)
|
||||
|
||||
assert.True(q.id == q2.id)
|
||||
@ -429,7 +425,7 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
|
||||
q := &qemu{}
|
||||
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(q.qemuConfig.Knobs.FileBackedMem, true)
|
||||
@ -445,7 +441,7 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
|
||||
sandbox.config.HypervisorConfig.MemoryPath = fallbackFileBackedMemDir
|
||||
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
|
||||
expectErr := errors.New("VM templating has been enabled with either virtio-fs or file backed memory and this configuration will not work")
|
||||
assert.Equal(expectErr.Error(), err.Error())
|
||||
@ -456,7 +452,7 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
|
||||
q = &qemu{}
|
||||
sandbox.config.HypervisorConfig.FileBackedMemRootDir = "/tmp/xyzabc"
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, sandbox.store, false)
|
||||
err = q.createSandbox(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig, false)
|
||||
assert.NoError(err)
|
||||
assert.Equal(q.qemuConfig.Knobs.FileBackedMem, false)
|
||||
assert.Equal(q.qemuConfig.Knobs.MemShared, false)
|
||||
|
@ -566,38 +566,14 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
}
|
||||
}()
|
||||
|
||||
if s.supportNewStore() {
|
||||
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil)
|
||||
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil)
|
||||
|
||||
// Ignore the error. Restore can fail for a new sandbox
|
||||
s.Restore()
|
||||
// Ignore the error. Restore can fail for a new sandbox
|
||||
s.Restore()
|
||||
|
||||
// new store doesn't require hypervisor to be stored immediately
|
||||
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, nil, s.stateful); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Fetch sandbox network to be able to access it from the sandbox structure.
|
||||
var networkNS NetworkNamespace
|
||||
if err = s.store.Load(store.Network, &networkNS); err == nil {
|
||||
s.networkNS = networkNS
|
||||
}
|
||||
|
||||
devices, err := s.store.LoadDevices()
|
||||
if err != nil {
|
||||
s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("load sandbox devices failed")
|
||||
}
|
||||
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, devices)
|
||||
|
||||
// Load sandbox state. The hypervisor.createSandbox call, may need to access statei.
|
||||
state, err := s.store.LoadState()
|
||||
if err == nil {
|
||||
s.state = state
|
||||
}
|
||||
|
||||
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.store, s.stateful); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// new store doesn't require hypervisor to be stored immediately
|
||||
if err = s.hypervisor.createSandbox(ctx, s.id, s.networkNS, &sandboxConfig.HypervisorConfig, s.stateful); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
agentConfig, err := newAgentConfig(sandboxConfig.AgentType, sandboxConfig.AgentConfig)
|
||||
|
@ -34,8 +34,6 @@ type VM struct {
|
||||
memory uint32
|
||||
|
||||
cpuDelta uint32
|
||||
|
||||
store *store.VCStore
|
||||
}
|
||||
|
||||
// VMConfig is a collection of all info that a new blackbox VM needs.
|
||||
@ -157,22 +155,13 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
|
||||
virtLog.WithField("vm", id).WithField("config", config).Info("create new vm")
|
||||
|
||||
vcStore, err := store.NewVCStore(ctx,
|
||||
store.SandboxConfigurationRoot(id),
|
||||
store.SandboxRuntimeRoot(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
virtLog.WithField("vm", id).WithError(err).Error("failed to create new vm")
|
||||
virtLog.WithField("vm", id).Errorf("Deleting store for %s", id)
|
||||
vcStore.Delete()
|
||||
}
|
||||
}()
|
||||
|
||||
if err = hypervisor.createSandbox(ctx, id, NetworkNamespace{}, &config.HypervisorConfig, vcStore, false); err != nil {
|
||||
if err = hypervisor.createSandbox(ctx, id, NetworkNamespace{}, &config.HypervisorConfig, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -230,7 +219,6 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
proxyURL: url,
|
||||
cpu: config.HypervisorConfig.NumVCPUs,
|
||||
memory: config.HypervisorConfig.MemorySize,
|
||||
store: vcStore,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -243,22 +231,13 @@ func NewVMFromGrpc(ctx context.Context, v *pb.GrpcVM, config VMConfig) (*VM, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vcStore, err := store.NewVCStore(ctx,
|
||||
store.SandboxConfigurationRoot(v.Id),
|
||||
store.SandboxRuntimeRoot(v.Id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
virtLog.WithField("vm", v.Id).WithError(err).Error("failed to create new vm from Grpc")
|
||||
virtLog.WithField("vm", v.Id).Errorf("Deleting store for %s", v.Id)
|
||||
vcStore.Delete()
|
||||
}
|
||||
}()
|
||||
|
||||
err = hypervisor.fromGrpc(ctx, &config.HypervisorConfig, vcStore, v.Hypervisor)
|
||||
err = hypervisor.fromGrpc(ctx, &config.HypervisorConfig, v.Hypervisor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -339,7 +318,7 @@ func (v *VM) Stop() error {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.store.Delete()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddCPUs adds num of CPUs to the VM.
|
||||
|
Loading…
Reference in New Issue
Block a user