mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-08-15 06:34:03 +00:00
Merge pull request #3028 from egernst/hypervisor-hacking
Hypervisor cleanup, refactoring
This commit is contained in:
commit
01b6ffc0a4
@ -983,11 +983,13 @@ func TestGetHypervisorInfoSocket(t *testing.T) {
|
||||
{vc.QemuHypervisor, false},
|
||||
}
|
||||
|
||||
config.HypervisorConfig.VMStorePath = "/foo"
|
||||
config.HypervisorConfig.RunStorePath = "/bar"
|
||||
|
||||
for i, details := range hypervisors {
|
||||
msg := fmt.Sprintf("hypervisor[%d]: %+v", i, details)
|
||||
|
||||
config.HypervisorType = details.hType
|
||||
|
||||
info, err := getHypervisorInfo(config)
|
||||
assert.NoError(err, msg)
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package persistapi
|
||||
package hypervisors
|
||||
|
||||
// Bridge is a bridge where devices can be hot plugged
|
||||
type Bridge struct {
|
@ -120,6 +120,9 @@ func CreateSandbox(ctx context.Context, vci vc.VC, ociSpec specs.Spec, runtimeCo
|
||||
return nil, vc.Process{}, err
|
||||
}
|
||||
|
||||
// setup shared path in hypervisor config:
|
||||
sandboxConfig.HypervisorConfig.SharedPath = vc.GetSharePath(containerID)
|
||||
|
||||
if err := checkForFIPS(&sandboxConfig); err != nil {
|
||||
return nil, vc.Process{}, err
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -20,6 +19,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/uuid"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
@ -39,11 +39,13 @@ var acrnTracingTags = map[string]string{
|
||||
|
||||
// Since ACRN is using the store in a quite abnormal way, let's first draw it back from store to here
|
||||
|
||||
/*
|
||||
// UUIDPathSuffix is the suffix used for uuid storage
|
||||
const (
|
||||
UUIDPathSuffix = "uuid"
|
||||
uuidFile = "uuid.json"
|
||||
)
|
||||
*/
|
||||
|
||||
// ACRN currently supports only known UUIDs for security
|
||||
// reasons (FuSa). When launching VM, only these pre-defined
|
||||
@ -312,7 +314,7 @@ func (a *Acrn) setup(ctx context.Context, id string, hypervisorConfig *Hyperviso
|
||||
|
||||
// The path might already exist, but in case of VM templating,
|
||||
// we have to create it since the sandbox has not created it yet.
|
||||
if err = os.MkdirAll(filepath.Join(a.store.RunStoragePath(), id), DirMode); err != nil {
|
||||
if err = os.MkdirAll(filepath.Join(a.config.RunStorePath, id), DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -438,7 +440,7 @@ func (a *Acrn) StartVM(ctx context.Context, timeoutSecs int) error {
|
||||
a.Logger().WithField("default-kernel-parameters", formatted).Debug()
|
||||
}
|
||||
|
||||
vmPath := filepath.Join(a.store.RunVMStoragePath(), a.id)
|
||||
vmPath := filepath.Join(a.config.VMStorePath, a.id)
|
||||
err := os.MkdirAll(vmPath, DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -634,7 +636,7 @@ func (a *Acrn) GetVMConsole(ctx context.Context, id string) (string, string, err
|
||||
span, _ := katatrace.Trace(ctx, a.Logger(), "GetVMConsole", acrnTracingTags, map[string]string{"sandbox_id": a.id})
|
||||
defer span.End()
|
||||
|
||||
consoleURL, err := utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket)
|
||||
consoleURL, err := utils.BuildSocketPath(a.config.VMStorePath, id, acrnConsoleSocket)
|
||||
if err != nil {
|
||||
return consoleProtoUnix, "", err
|
||||
}
|
||||
@ -698,14 +700,14 @@ func (a *Acrn) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("acrn is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (a *Acrn) Save() (s persistapi.HypervisorState) {
|
||||
func (a *Acrn) Save() (s hv.HypervisorState) {
|
||||
s.Pid = a.state.PID
|
||||
s.Type = string(AcrnHypervisor)
|
||||
s.UUID = a.state.UUID
|
||||
return
|
||||
}
|
||||
|
||||
func (a *Acrn) Load(s persistapi.HypervisorState) {
|
||||
func (a *Acrn) Load(s hv.HypervisorState) {
|
||||
a.state.PID = s.Pid
|
||||
a.state.UUID = s.UUID
|
||||
}
|
||||
@ -719,7 +721,7 @@ func (a *Acrn) Check() error {
|
||||
}
|
||||
|
||||
func (a *Acrn) GenerateSocket(id string) (interface{}, error) {
|
||||
return generateVMSocket(id, a.store.RunVMStoragePath())
|
||||
return generateVMSocket(id, a.config.VMStorePath)
|
||||
}
|
||||
|
||||
// GetACRNUUIDBytes returns UUID bytes that is used for VM creation
|
||||
@ -782,38 +784,36 @@ func (a *Acrn) GetMaxSupportedACRNVM() (uint8, error) {
|
||||
}
|
||||
|
||||
func (a *Acrn) storeInfo() error {
|
||||
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
|
||||
/*
|
||||
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
|
||||
|
||||
jsonOut, err := json.Marshal(a.info)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not marshal data: %s", err)
|
||||
}
|
||||
jsonOut, err := json.Marshal(a.info)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not marshal data: %s", err)
|
||||
}
|
||||
|
||||
if err := a.store.GlobalWrite(relPath, jsonOut); err != nil {
|
||||
return fmt.Errorf("failed to write uuid to file: %v", err)
|
||||
}
|
||||
if err := a.store.GlobalWrite(relPath, jsonOut); err != nil {
|
||||
return fmt.Errorf("failed to write uuid to file: %v", err)
|
||||
}*/
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Acrn) loadInfo() error {
|
||||
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
|
||||
/*
|
||||
relPath := filepath.Join(UUIDPathSuffix, uuidFile)
|
||||
data, err := a.store.GlobalRead(relPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read uuid from file: %v", err)
|
||||
}
|
||||
|
||||
data, err := a.store.GlobalRead(relPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read uuid from file: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal(data, &a.info); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal uuid info: %v", err)
|
||||
}*/
|
||||
|
||||
if err := json.Unmarshal(data, &a.info); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal uuid info: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Acrn) IsRateLimiterBuiltin() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *Acrn) setSandbox(sandbox *Sandbox) {
|
||||
a.sandbox = sandbox
|
||||
}
|
||||
|
@ -199,11 +199,15 @@ func TestAcrnGetSandboxConsole(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
a := &Acrn{
|
||||
ctx: context.Background(),
|
||||
ctx: context.Background(),
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: store.RunVMStoragePath(),
|
||||
RunStorePath: store.RunStoragePath(),
|
||||
},
|
||||
store: store,
|
||||
}
|
||||
sandboxID := "testSandboxID"
|
||||
expected := filepath.Join(a.store.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
|
||||
proto, result, err := a.GetVMConsole(a.ctx, sandboxID)
|
||||
assert.NoError(err)
|
||||
@ -219,6 +223,10 @@ func TestAcrnCreateVM(t *testing.T) {
|
||||
|
||||
a := &Acrn{
|
||||
store: store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: store.RunVMStoragePath(),
|
||||
RunStorePath: store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
|
||||
sandbox := &Sandbox{
|
||||
|
@ -35,7 +35,7 @@ var virtLog = logrus.WithField("source", "virtcontainers")
|
||||
func SetLogger(ctx context.Context, logger *logrus.Entry) {
|
||||
fields := virtLog.Data
|
||||
virtLog = logger.WithFields(fields)
|
||||
|
||||
SetHypervisorLogger(virtLog) // TODO: this will move to hypervisors pkg
|
||||
deviceApi.SetLogger(virtLog)
|
||||
compatoci.SetLogger(virtLog)
|
||||
deviceConfig.SetLogger(virtLog)
|
||||
|
@ -20,12 +20,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/console"
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
chclient "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cloud-hypervisor/client"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
|
||||
@ -157,7 +157,6 @@ func (s *CloudHypervisorState) reset() {
|
||||
}
|
||||
|
||||
type cloudHypervisor struct {
|
||||
store persistapi.PersistDriver
|
||||
console console.Console
|
||||
virtiofsd Virtiofsd
|
||||
APIClient clhClient
|
||||
@ -226,7 +225,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, networkNS N
|
||||
clh.Logger().WithField("function", "CreateVM").Info("Sandbox already exist, loading from state")
|
||||
clh.virtiofsd = &virtiofsd{
|
||||
PID: clh.state.VirtiofsdPID,
|
||||
sourcePath: filepath.Join(getSharePath(clh.id)),
|
||||
sourcePath: hypervisorConfig.SharedPath,
|
||||
debug: clh.config.Debug,
|
||||
socketPath: virtiofsdSocketPath,
|
||||
}
|
||||
@ -342,7 +341,7 @@ func (clh *cloudHypervisor) CreateVM(ctx context.Context, id string, networkNS N
|
||||
|
||||
clh.virtiofsd = &virtiofsd{
|
||||
path: clh.config.VirtioFSDaemon,
|
||||
sourcePath: filepath.Join(getSharePath(clh.id)),
|
||||
sourcePath: filepath.Join(GetSharePath(clh.id)),
|
||||
socketPath: virtiofsdSocketPath,
|
||||
extraArgs: clh.config.VirtioFSExtraArgs,
|
||||
debug: clh.config.Debug,
|
||||
@ -374,7 +373,7 @@ func (clh *cloudHypervisor) StartVM(ctx context.Context, timeout int) error {
|
||||
|
||||
clh.Logger().WithField("function", "StartVM").Info("starting Sandbox")
|
||||
|
||||
vmPath := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
|
||||
vmPath := filepath.Join(clh.config.VMStorePath, clh.id)
|
||||
err := os.MkdirAll(vmPath, DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -747,7 +746,7 @@ func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("cloudHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) Save() (s persistapi.HypervisorState) {
|
||||
func (clh *cloudHypervisor) Save() (s hv.HypervisorState) {
|
||||
s.Pid = clh.state.PID
|
||||
s.Type = string(ClhHypervisor)
|
||||
s.VirtiofsdPid = clh.state.VirtiofsdPID
|
||||
@ -755,7 +754,7 @@ func (clh *cloudHypervisor) Save() (s persistapi.HypervisorState) {
|
||||
return
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) Load(s persistapi.HypervisorState) {
|
||||
func (clh *cloudHypervisor) Load(s hv.HypervisorState) {
|
||||
clh.state.PID = s.Pid
|
||||
clh.state.VirtiofsdPID = s.VirtiofsdPid
|
||||
clh.state.apiSocket = s.APISocket
|
||||
@ -814,7 +813,7 @@ func (clh *cloudHypervisor) AddDevice(ctx context.Context, devInfo interface{},
|
||||
//###########################################################################
|
||||
|
||||
func (clh *cloudHypervisor) Logger() *log.Entry {
|
||||
return virtLog.WithField("subsystem", "cloudHypervisor")
|
||||
return hvLogger.WithField("subsystem", "cloudHypervisor")
|
||||
}
|
||||
|
||||
// Adds all capabilities supported by cloudHypervisor implementation of hypervisor interface
|
||||
@ -893,15 +892,15 @@ func (clh *cloudHypervisor) GenerateSocket(id string) (interface{}, error) {
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) virtioFsSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, virtioFsSocket)
|
||||
return utils.BuildSocketPath(clh.config.VMStorePath, id, virtioFsSocket)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) vsockSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhSocket)
|
||||
return utils.BuildSocketPath(clh.config.VMStorePath, id, clhSocket)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) apiSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(clh.store.RunVMStoragePath(), id, clhAPISocket)
|
||||
return utils.BuildSocketPath(clh.config.VMStorePath, id, clhAPISocket)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) waitVMM(timeout uint) error {
|
||||
@ -1213,7 +1212,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
|
||||
}
|
||||
|
||||
// Cleanup vm path
|
||||
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
|
||||
dir := filepath.Join(clh.config.VMStorePath, clh.id)
|
||||
|
||||
// If it's a symlink, remove both dir and the target.
|
||||
link, err := filepath.EvalSymlinks(dir)
|
||||
@ -1242,7 +1241,7 @@ func (clh *cloudHypervisor) cleanupVM(force bool) error {
|
||||
}
|
||||
|
||||
if clh.config.VMid != "" {
|
||||
dir = filepath.Join(clh.store.RunStoragePath(), clh.config.VMid)
|
||||
dir = filepath.Join(clh.config.VMStorePath, clh.config.VMid)
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
if !force {
|
||||
return err
|
||||
@ -1272,6 +1271,3 @@ func (clh *cloudHypervisor) vmInfo() (chclient.VmInfo, error) {
|
||||
func (clh *cloudHypervisor) IsRateLimiterBuiltin() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) setSandbox(sandbox *Sandbox) {
|
||||
}
|
||||
|
@ -203,7 +203,10 @@ func TestCloudHypervisorCleanupVM(t *testing.T) {
|
||||
assert.NoError(err, "persist.GetDriver() unexpected error")
|
||||
|
||||
clh := &cloudHypervisor{
|
||||
store: store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: store.RunVMStoragePath(),
|
||||
RunStorePath: store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
|
||||
err = clh.cleanupVM(true)
|
||||
@ -214,7 +217,7 @@ func TestCloudHypervisorCleanupVM(t *testing.T) {
|
||||
err = clh.cleanupVM(true)
|
||||
assert.NoError(err, "persist.GetDriver() unexpected error")
|
||||
|
||||
dir := filepath.Join(clh.store.RunVMStoragePath(), clh.id)
|
||||
dir := filepath.Join(store.RunVMStoragePath(), clh.id)
|
||||
os.MkdirAll(dir, os.ModePerm)
|
||||
|
||||
err = clh.cleanupVM(false)
|
||||
@ -235,9 +238,11 @@ func TestClhCreateVM(t *testing.T) {
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
|
||||
clhConfig.VMStorePath = store.RunVMStoragePath()
|
||||
clhConfig.RunStorePath = store.RunStoragePath()
|
||||
|
||||
clh := &cloudHypervisor{
|
||||
config: clhConfig,
|
||||
store: store,
|
||||
}
|
||||
|
||||
sandbox := &Sandbox{
|
||||
@ -261,11 +266,13 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
|
||||
clhConfig.VMStorePath = store.RunVMStoragePath()
|
||||
clhConfig.RunStorePath = store.RunStoragePath()
|
||||
|
||||
clh := &cloudHypervisor{
|
||||
config: clhConfig,
|
||||
APIClient: &clhClientMock{},
|
||||
virtiofsd: &virtiofsdMock{},
|
||||
store: store,
|
||||
}
|
||||
|
||||
err = clh.StartVM(context.Background(), 10)
|
||||
@ -379,6 +386,11 @@ func TestClhGenerateSocket(t *testing.T) {
|
||||
clh, ok := hypervisor.(*cloudHypervisor)
|
||||
assert.True(ok)
|
||||
|
||||
clh.config = HypervisorConfig{
|
||||
VMStorePath: "/foo",
|
||||
RunStorePath: "/bar",
|
||||
}
|
||||
|
||||
clh.addVSock(1, "path")
|
||||
|
||||
s, err := clh.GenerateSocket("c")
|
||||
@ -391,7 +403,7 @@ func TestClhGenerateSocket(t *testing.T) {
|
||||
assert.NotEmpty(hvsock.UdsPath)
|
||||
|
||||
// Path must be absolute
|
||||
assert.True(strings.HasPrefix(hvsock.UdsPath, "/"))
|
||||
assert.True(strings.HasPrefix(hvsock.UdsPath, "/"), "failed: socket path: %s", hvsock.UdsPath)
|
||||
|
||||
assert.NotZero(hvsock.Port)
|
||||
}
|
||||
|
@ -22,9 +22,9 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/firecracker/client"
|
||||
models "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/firecracker/client/models"
|
||||
ops "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/firecracker/client/operations"
|
||||
@ -1226,13 +1226,13 @@ func (fc *firecracker) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("firecracker is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (fc *firecracker) Save() (s persistapi.HypervisorState) {
|
||||
func (fc *firecracker) Save() (s hv.HypervisorState) {
|
||||
s.Pid = fc.info.PID
|
||||
s.Type = string(FirecrackerHypervisor)
|
||||
return
|
||||
}
|
||||
|
||||
func (fc *firecracker) Load(s persistapi.HypervisorState) {
|
||||
func (fc *firecracker) Load(s hv.HypervisorState) {
|
||||
fc.info.PID = s.Pid
|
||||
}
|
||||
|
||||
@ -1274,6 +1274,3 @@ func revertBytes(num uint64) uint64 {
|
||||
}
|
||||
return 1024*revertBytes(a) + b
|
||||
}
|
||||
|
||||
func (fc *firecracker) setSandbox(sandbox *Sandbox) {
|
||||
}
|
||||
|
@ -14,11 +14,12 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// HypervisorType describes an hypervisor type.
|
||||
@ -46,14 +47,10 @@ const (
|
||||
|
||||
// MockHypervisor is a mock hypervisor for testing purposes
|
||||
MockHypervisor HypervisorType = "mock"
|
||||
)
|
||||
|
||||
const (
|
||||
procMemInfo = "/proc/meminfo"
|
||||
procCPUInfo = "/proc/cpuinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultVCPUs = 1
|
||||
// 2 GiB
|
||||
defaultMemSzMiB = 2048
|
||||
@ -74,6 +71,10 @@ const (
|
||||
MinHypervisorMemory = 256
|
||||
)
|
||||
|
||||
var (
|
||||
hvLogger = logrus.WithField("source", "virtcontainers/hypervisor")
|
||||
)
|
||||
|
||||
// In some architectures the maximum number of vCPUs depends on the number of physical cores.
|
||||
var defaultMaxQemuVCPUs = MaxQemuVCPUs()
|
||||
|
||||
@ -144,6 +145,12 @@ type MemoryDevice struct {
|
||||
Probe bool
|
||||
}
|
||||
|
||||
// SetHypervisorLogger sets up a logger for the hypervisor part of this pkg
|
||||
func SetHypervisorLogger(logger *logrus.Entry) {
|
||||
fields := hvLogger.Data
|
||||
hvLogger = logger.WithFields(fields)
|
||||
}
|
||||
|
||||
// Set sets an hypervisor type based on the input string.
|
||||
func (hType *HypervisorType) Set(value string) error {
|
||||
switch value {
|
||||
@ -185,28 +192,18 @@ func (hType *HypervisorType) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// NewHypervisor returns an hypervisor from and hypervisor type.
|
||||
// NewHypervisor returns an hypervisor from a hypervisor type.
|
||||
func NewHypervisor(hType HypervisorType) (Hypervisor, error) {
|
||||
store, err := persist.GetDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch hType {
|
||||
case QemuHypervisor:
|
||||
return &qemu{
|
||||
store: store,
|
||||
}, nil
|
||||
return &qemu{}, nil
|
||||
case FirecrackerHypervisor:
|
||||
return &firecracker{}, nil
|
||||
case AcrnHypervisor:
|
||||
return &Acrn{
|
||||
store: store,
|
||||
}, nil
|
||||
return &Acrn{}, nil
|
||||
case ClhHypervisor:
|
||||
return &cloudHypervisor{
|
||||
store: store,
|
||||
}, nil
|
||||
return &cloudHypervisor{}, nil
|
||||
case MockHypervisor:
|
||||
return &mockHypervisor{}, nil
|
||||
default:
|
||||
@ -315,13 +312,19 @@ type HypervisorConfig struct {
|
||||
EntropySource string
|
||||
|
||||
// Shared file system type:
|
||||
// - virtio-9p (default)
|
||||
// - virtio-fs
|
||||
// - virtio-9p
|
||||
// - virtio-fs (default)
|
||||
SharedFS string
|
||||
|
||||
// Path for filesystem sharing
|
||||
SharedPath string
|
||||
|
||||
// VirtioFSDaemon is the virtio-fs vhost-user daemon path
|
||||
VirtioFSDaemon string
|
||||
|
||||
// VirtioFSCache cache mode for fs version cache or "none"
|
||||
VirtioFSCache string
|
||||
|
||||
// File based memory backend root directory
|
||||
FileBackedMemRootDir string
|
||||
|
||||
@ -339,12 +342,15 @@ type HypervisorConfig struct {
|
||||
// VMid is "" if the hypervisor is not created by the factory.
|
||||
VMid string
|
||||
|
||||
// VMStorePath is the location on disk where VM information will persist
|
||||
VMStorePath string
|
||||
|
||||
// VMStorePath is the location on disk where runtime information will persist
|
||||
RunStorePath string
|
||||
|
||||
// SELinux label for the VM
|
||||
SELinuxProcessLabel string
|
||||
|
||||
// VirtioFSCache cache mode for fs version cache or "none"
|
||||
VirtioFSCache string
|
||||
|
||||
// HypervisorPathList is the list of hypervisor paths names allowed in annotations
|
||||
HypervisorPathList []string
|
||||
|
||||
@ -606,7 +612,7 @@ func (conf *HypervisorConfig) AddCustomAsset(a *types.Asset) error {
|
||||
return fmt.Errorf("Invalid %s at %s", a.Type(), a.Path())
|
||||
}
|
||||
|
||||
virtLog.Debugf("Using custom %v asset %s", a.Type(), a.Path())
|
||||
hvLogger.Debugf("Using custom %v asset %s", a.Type(), a.Path())
|
||||
|
||||
if conf.customAssets == nil {
|
||||
conf.customAssets = make(map[types.AssetType]*types.Asset)
|
||||
@ -874,7 +880,7 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
|
||||
return flags["hypervisor"], nil
|
||||
}
|
||||
|
||||
virtLog.WithField("arch", runtime.GOARCH).Info("Unable to know if the system is running inside a VM")
|
||||
hvLogger.WithField("arch", runtime.GOARCH).Info("Unable to know if the system is running inside a VM")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -931,14 +937,12 @@ type Hypervisor interface {
|
||||
toGrpc(ctx context.Context) ([]byte, error)
|
||||
Check() error
|
||||
|
||||
Save() persistapi.HypervisorState
|
||||
Load(persistapi.HypervisorState)
|
||||
Save() hv.HypervisorState
|
||||
Load(hv.HypervisorState)
|
||||
|
||||
// generate the socket to communicate the host and guest
|
||||
GenerateSocket(id string) (interface{}, error)
|
||||
|
||||
// check if hypervisor supports built-in rate limiter.
|
||||
IsRateLimiterBuiltin() bool
|
||||
|
||||
setSandbox(sandbox *Sandbox)
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ var kataHostSharedDir = func() string {
|
||||
// 2. /run/kata-containers/shared/sandboxes/$sbx_id/mounts/ is bind mounted readonly to /run/kata-containers/shared/sandboxes/$sbx_id/shared/, so guest cannot modify it
|
||||
//
|
||||
// 3. host-guest shared files/directories are mounted one-level under /run/kata-containers/shared/sandboxes/$sbx_id/mounts/ and thus present to guest at one level under /run/kata-containers/shared/sandboxes/$sbx_id/shared/
|
||||
func getSharePath(id string) string {
|
||||
func GetSharePath(id string) string {
|
||||
return filepath.Join(kataHostSharedDir(), id, "shared")
|
||||
}
|
||||
|
||||
@ -356,7 +356,7 @@ func (k *kataAgent) setupSandboxBindMounts(ctx context.Context, sandbox *Sandbox
|
||||
|
||||
// Create subdirectory in host shared path for sandbox mounts
|
||||
sandboxMountDir := filepath.Join(getMountPath(sandbox.id), sandboxMountsDir)
|
||||
sandboxShareDir := filepath.Join(getSharePath(sandbox.id), sandboxMountsDir)
|
||||
sandboxShareDir := filepath.Join(GetSharePath(sandbox.id), sandboxMountsDir)
|
||||
if err := os.MkdirAll(sandboxMountDir, DirMode); err != nil {
|
||||
return fmt.Errorf("Creating sandbox shared mount directory: %v: %w", sandboxMountDir, err)
|
||||
}
|
||||
@ -473,7 +473,7 @@ func (k *kataAgent) setupSharedPath(ctx context.Context, sandbox *Sandbox) (err
|
||||
defer span.End()
|
||||
|
||||
// create shared path structure
|
||||
sharePath := getSharePath(sandbox.id)
|
||||
sharePath := GetSharePath(sandbox.id)
|
||||
mountPath := getMountPath(sandbox.id)
|
||||
if err := os.MkdirAll(sharePath, sharedDirMode); err != nil {
|
||||
return err
|
||||
@ -509,7 +509,7 @@ func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
if err := k.setupSharedPath(ctx, sandbox); err != nil {
|
||||
return err
|
||||
}
|
||||
return k.configure(ctx, sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), sandbox.config.AgentConfig)
|
||||
return k.configure(ctx, sandbox.hypervisor, sandbox.id, GetSharePath(sandbox.id), sandbox.config.AgentConfig)
|
||||
}
|
||||
|
||||
func cmdToKataProcess(cmd types.Cmd) (process *grpc.Process, err error) {
|
||||
@ -2198,7 +2198,7 @@ func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
|
||||
}
|
||||
|
||||
// Unmount shared path
|
||||
path := getSharePath(s.id)
|
||||
path := GetSharePath(s.id)
|
||||
k.Logger().WithField("path", path).Infof("Cleanup agent")
|
||||
if err := syscall.Unmount(path, syscall.MNT_DETACH|UmountNoFollow); err != nil {
|
||||
k.Logger().WithError(err).Errorf("failed to unmount vm share path %s", path)
|
||||
|
@ -1158,7 +1158,7 @@ func TestSandboxBindMount(t *testing.T) {
|
||||
assert.Nil(err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
sharePath := getSharePath(sandbox.id)
|
||||
sharePath := GetSharePath(sandbox.id)
|
||||
mountPath := getMountPath(sandbox.id)
|
||||
|
||||
err = os.MkdirAll(sharePath, DirMode)
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
|
||||
)
|
||||
|
||||
@ -130,11 +130,11 @@ func (m *mockHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("mockHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) Save() (s persistapi.HypervisorState) {
|
||||
func (m *mockHypervisor) Save() (s hv.HypervisorState) {
|
||||
return
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) Load(s persistapi.HypervisorState) {}
|
||||
func (m *mockHypervisor) Load(s hv.HypervisorState) {}
|
||||
|
||||
func (m *mockHypervisor) Check() error {
|
||||
return nil
|
||||
@ -149,6 +149,3 @@ func (m *mockHypervisor) GenerateSocket(id string) (interface{}, error) {
|
||||
func (m *mockHypervisor) IsRateLimiterBuiltin() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) setSandbox(sandbox *Sandbox) {
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ package virtcontainers
|
||||
import (
|
||||
"errors"
|
||||
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
|
||||
exp "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/experimental"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
|
||||
@ -314,7 +315,7 @@ func (c *Container) loadContState(cs persistapi.ContainerState) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sandbox) loadHypervisor(hs persistapi.HypervisorState) {
|
||||
func (s *Sandbox) loadHypervisor(hs hv.HypervisorState) {
|
||||
s.hypervisor.Load(hs)
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,10 @@
|
||||
|
||||
package persistapi
|
||||
|
||||
import (
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
)
|
||||
|
||||
// ============= sandbox level resources =============
|
||||
|
||||
// AgentState save agent state data
|
||||
@ -38,7 +42,7 @@ type SandboxState struct {
|
||||
OverheadCgroupPath string
|
||||
|
||||
// HypervisorState saves hypervisor specific data
|
||||
HypervisorState HypervisorState
|
||||
HypervisorState hv.HypervisorState
|
||||
|
||||
// AgentState saves state data of agent
|
||||
AgentState AgentState
|
||||
|
@ -31,11 +31,11 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
hv "github.com/kata-containers/kata-containers/src/runtime/pkg/hypervisors"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
|
||||
pkgUtils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/uuid"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
|
||||
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
|
||||
@ -67,18 +67,12 @@ type qmpChannel struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// CPUDevice represents a CPU device which was hot-added in a running VM
|
||||
type CPUDevice struct {
|
||||
// ID is used to identify this CPU in the hypervisor options.
|
||||
ID string
|
||||
}
|
||||
|
||||
// QemuState keeps Qemu's state
|
||||
type QemuState struct {
|
||||
UUID string
|
||||
Bridges []types.Bridge
|
||||
// HotpluggedCPUs is the list of CPUs that were hot-added
|
||||
HotpluggedVCPUs []CPUDevice
|
||||
HotpluggedVCPUs []hv.CPUDevice
|
||||
HotpluggedMemory int
|
||||
VirtiofsdPid int
|
||||
PCIeRootPort int
|
||||
@ -92,8 +86,6 @@ type qemu struct {
|
||||
|
||||
virtiofsd Virtiofsd
|
||||
|
||||
store persistapi.PersistDriver
|
||||
|
||||
ctx context.Context
|
||||
|
||||
// fds is a list of file descriptors inherited by QEMU process
|
||||
@ -149,7 +141,7 @@ type qmpLogger struct {
|
||||
|
||||
func newQMPLogger() qmpLogger {
|
||||
return qmpLogger{
|
||||
logger: virtLog.WithField("subsystem", "qmp"),
|
||||
logger: hvLogger.WithField("subsystem", "qmp"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -171,7 +163,7 @@ func (l qmpLogger) Errorf(format string, v ...interface{}) {
|
||||
|
||||
// Logger returns a logrus logger appropriate for logging qemu messages
|
||||
func (q *qemu) Logger() *logrus.Entry {
|
||||
return virtLog.WithField("subsystem", "qemu")
|
||||
return hvLogger.WithField("subsystem", "qemu")
|
||||
}
|
||||
|
||||
func (q *qemu) kernelParameters() string {
|
||||
@ -276,7 +268,7 @@ func (q *qemu) setup(ctx context.Context, id string, hypervisorConfig *Hyperviso
|
||||
|
||||
// The path might already exist, but in case of VM templating,
|
||||
// we have to create it since the sandbox has not created it yet.
|
||||
if err = utils.MkdirAllWithInheritedOwner(filepath.Join(q.store.RunStoragePath(), id), DirMode); err != nil {
|
||||
if err = utils.MkdirAllWithInheritedOwner(filepath.Join(q.config.RunStorePath, id), DirMode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -331,7 +323,7 @@ func (q *qemu) memoryTopology() (govmmQemu.Memory, error) {
|
||||
}
|
||||
|
||||
func (q *qemu) qmpSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(q.store.RunVMStoragePath(), id, qmpSocket)
|
||||
return utils.BuildSocketPath(q.config.VMStorePath, id, qmpSocket)
|
||||
}
|
||||
|
||||
func (q *qemu) getQemuMachine() (govmmQemu.Machine, error) {
|
||||
@ -618,7 +610,7 @@ func (q *qemu) CreateVM(ctx context.Context, id string, networkNS NetworkNamespa
|
||||
GlobalParam: "kvm-pit.lost_tick_policy=discard",
|
||||
Bios: firmwarePath,
|
||||
PFlash: pflash,
|
||||
PidFile: filepath.Join(q.store.RunVMStoragePath(), q.id, "pid"),
|
||||
PidFile: filepath.Join(q.config.VMStorePath, q.id, "pid"),
|
||||
}
|
||||
|
||||
qemuConfig.Devices, qemuConfig.Bios, err = q.arch.appendProtectionDevice(qemuConfig.Devices, firmwarePath)
|
||||
@ -655,7 +647,7 @@ func (q *qemu) CreateVM(ctx context.Context, id string, networkNS NetworkNamespa
|
||||
|
||||
q.virtiofsd = &virtiofsd{
|
||||
path: q.config.VirtioFSDaemon,
|
||||
sourcePath: filepath.Join(getSharePath(q.id)),
|
||||
sourcePath: hypervisorConfig.SharedPath,
|
||||
socketPath: virtiofsdSocketPath,
|
||||
extraArgs: q.config.VirtioFSExtraArgs,
|
||||
debug: q.config.Debug,
|
||||
@ -666,7 +658,7 @@ func (q *qemu) CreateVM(ctx context.Context, id string, networkNS NetworkNamespa
|
||||
}
|
||||
|
||||
func (q *qemu) vhostFSSocketPath(id string) (string, error) {
|
||||
return utils.BuildSocketPath(q.store.RunVMStoragePath(), id, vhostFSSocket)
|
||||
return utils.BuildSocketPath(q.config.VMStorePath, id, vhostFSSocket)
|
||||
}
|
||||
|
||||
func (q *qemu) setupVirtiofsd(ctx context.Context) (err error) {
|
||||
@ -795,7 +787,7 @@ func (q *qemu) StartVM(ctx context.Context, timeout int) error {
|
||||
q.fds = []*os.File{}
|
||||
}()
|
||||
|
||||
vmPath := filepath.Join(q.store.RunVMStoragePath(), q.id)
|
||||
vmPath := filepath.Join(q.config.VMStorePath, q.id)
|
||||
err := utils.MkdirAllWithInheritedOwner(vmPath, DirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1002,7 +994,7 @@ func (q *qemu) StopVM(ctx context.Context, waitOnly bool) error {
|
||||
func (q *qemu) cleanupVM() error {
|
||||
|
||||
// Cleanup vm path
|
||||
dir := filepath.Join(q.store.RunVMStoragePath(), q.id)
|
||||
dir := filepath.Join(q.config.VMStorePath, q.id)
|
||||
|
||||
// If it's a symlink, remove both dir and the target.
|
||||
// This can happen when vm template links a sandbox to a vm.
|
||||
@ -1023,7 +1015,7 @@ func (q *qemu) cleanupVM() error {
|
||||
}
|
||||
|
||||
if q.config.VMid != "" {
|
||||
dir = filepath.Join(q.store.RunStoragePath(), q.config.VMid)
|
||||
dir = filepath.Join(q.config.RunStorePath, q.config.VMid)
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
q.Logger().WithError(err).WithField("path", dir).Warnf("failed to remove vm path")
|
||||
}
|
||||
@ -1149,7 +1141,7 @@ func (q *qemu) dumpSandboxMetaInfo(dumpSavePath string) {
|
||||
dumpStatePath := filepath.Join(dumpSavePath, "state")
|
||||
|
||||
// copy state from /run/vc/sbs to memory dump directory
|
||||
statePath := filepath.Join(q.store.RunStoragePath(), q.id)
|
||||
statePath := filepath.Join(q.config.RunStorePath, q.id)
|
||||
command := []string{"/bin/cp", "-ar", statePath, dumpStatePath}
|
||||
q.Logger().WithField("command", command).Info("try to Save sandbox state")
|
||||
if output, err := pkgUtils.RunCommandFull(command, true); err != nil {
|
||||
@ -1822,7 +1814,7 @@ func (q *qemu) hotplugAddCPUs(amount uint32) (uint32, error) {
|
||||
}
|
||||
|
||||
// a new vCPU was added, update list of hotplugged vCPUs and Check if all vCPUs were added
|
||||
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, CPUDevice{cpuID})
|
||||
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, hv.CPUDevice{ID: cpuID})
|
||||
hotpluggedVCPUs++
|
||||
if hotpluggedVCPUs == amount {
|
||||
// All vCPUs were hotplugged
|
||||
@ -2030,7 +2022,7 @@ func (q *qemu) GetVMConsole(ctx context.Context, id string) (string, string, err
|
||||
span, _ := katatrace.Trace(ctx, q.Logger(), "GetVMConsole", qemuTracingTags, map[string]string{"sandbox_id": q.id})
|
||||
defer span.End()
|
||||
|
||||
consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket)
|
||||
consoleURL, err := utils.BuildSocketPath(q.config.VMStorePath, id, consoleSocket)
|
||||
if err != nil {
|
||||
return consoleProtoUnix, "", err
|
||||
}
|
||||
@ -2469,7 +2461,7 @@ func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return json.Marshal(&qp)
|
||||
}
|
||||
|
||||
func (q *qemu) Save() (s persistapi.HypervisorState) {
|
||||
func (q *qemu) Save() (s hv.HypervisorState) {
|
||||
|
||||
// If QEMU isn't even running, there isn't any state to Save
|
||||
if q.stopped {
|
||||
@ -2488,7 +2480,7 @@ func (q *qemu) Save() (s persistapi.HypervisorState) {
|
||||
s.PCIeRootPort = q.state.PCIeRootPort
|
||||
|
||||
for _, bridge := range q.arch.getBridges() {
|
||||
s.Bridges = append(s.Bridges, persistapi.Bridge{
|
||||
s.Bridges = append(s.Bridges, hv.Bridge{
|
||||
DeviceAddr: bridge.Devices,
|
||||
Type: string(bridge.Type),
|
||||
ID: bridge.ID,
|
||||
@ -2497,14 +2489,14 @@ func (q *qemu) Save() (s persistapi.HypervisorState) {
|
||||
}
|
||||
|
||||
for _, cpu := range q.state.HotpluggedVCPUs {
|
||||
s.HotpluggedVCPUs = append(s.HotpluggedVCPUs, persistapi.CPUDevice{
|
||||
s.HotpluggedVCPUs = append(s.HotpluggedVCPUs, hv.CPUDevice{
|
||||
ID: cpu.ID,
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (q *qemu) Load(s persistapi.HypervisorState) {
|
||||
func (q *qemu) Load(s hv.HypervisorState) {
|
||||
q.state.UUID = s.UUID
|
||||
q.state.HotpluggedMemory = s.HotpluggedMemory
|
||||
q.state.HotplugVFIOOnRootBus = s.HotplugVFIOOnRootBus
|
||||
@ -2516,7 +2508,7 @@ func (q *qemu) Load(s persistapi.HypervisorState) {
|
||||
}
|
||||
|
||||
for _, cpu := range s.HotpluggedVCPUs {
|
||||
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, CPUDevice{
|
||||
q.state.HotpluggedVCPUs = append(q.state.HotpluggedVCPUs, hv.CPUDevice{
|
||||
ID: cpu.ID,
|
||||
})
|
||||
}
|
||||
@ -2543,12 +2535,9 @@ func (q *qemu) Check() error {
|
||||
}
|
||||
|
||||
func (q *qemu) GenerateSocket(id string) (interface{}, error) {
|
||||
return generateVMSocket(id, q.store.RunVMStoragePath())
|
||||
return generateVMSocket(id, q.config.VMStorePath)
|
||||
}
|
||||
|
||||
func (q *qemu) IsRateLimiterBuiltin() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (q *qemu) setSandbox(sandbox *Sandbox) {
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func (q *qemuAmd64) cpuModel() string {
|
||||
// VMX is not migratable yet.
|
||||
// issue: https://github.com/kata-containers/runtime/issues/1750
|
||||
if q.vmFactory {
|
||||
virtLog.WithField("subsystem", "qemuAmd64").Warn("VMX is not migratable yet: turning it off")
|
||||
hvLogger.WithField("subsystem", "qemuAmd64").Warn("VMX is not migratable yet: turning it off")
|
||||
cpuModel += ",vmx=off"
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ func (q *qemuAmd64) enableProtection() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger := virtLog.WithFields(logrus.Fields{
|
||||
logger := hvLogger.WithFields(logrus.Fields{
|
||||
"subsystem": "qemuAmd64",
|
||||
"machine": q.qemuMachine,
|
||||
"kernel-params-debug": q.kernelParamsDebug,
|
||||
|
@ -846,6 +846,6 @@ func (q *qemuArchBase) setPFlash(p []string) {
|
||||
|
||||
// append protection device
|
||||
func (q *qemuArchBase) appendProtectionDevice(devices []govmmQemu.Device, firmware string) ([]govmmQemu.Device, string, error) {
|
||||
virtLog.WithField("arch", runtime.GOARCH).Warnf("Confidential Computing has not been implemented for this architecture")
|
||||
hvLogger.WithField("arch", runtime.GOARCH).Warnf("Confidential Computing has not been implemented for this architecture")
|
||||
return devices, firmware, nil
|
||||
}
|
||||
|
@ -171,6 +171,6 @@ func (q *qemuArm64) enableProtection() error {
|
||||
|
||||
func (q *qemuArm64) appendProtectionDevice(devices []govmmQemu.Device, firmware string) ([]govmmQemu.Device, string, error) {
|
||||
err := q.enableProtection()
|
||||
virtLog.WithField("arch", runtime.GOARCH).Warnf("%v", err)
|
||||
hvLogger.WithField("arch", runtime.GOARCH).Warnf("%v", err)
|
||||
return devices, firmware, err
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ var supportedQemuMachine = govmmQemu.Machine{
|
||||
|
||||
// Logger returns a logrus logger appropriate for logging qemu messages
|
||||
func (q *qemuPPC64le) Logger() *logrus.Entry {
|
||||
return virtLog.WithField("subsystem", "qemuPPC64le")
|
||||
return hvLogger.WithField("subsystem", "qemuPPC64le")
|
||||
}
|
||||
|
||||
// MaxQemuVCPUs returns the maximum number of vCPUs supported
|
||||
@ -141,7 +141,7 @@ func (q *qemuPPC64le) enableProtection() error {
|
||||
q.qemuMachine.Options += ","
|
||||
}
|
||||
q.qemuMachine.Options += fmt.Sprintf("confidential-guest-support=%s", pefID)
|
||||
virtLog.WithFields(logrus.Fields{
|
||||
hvLogger.WithFields(logrus.Fields{
|
||||
"subsystem": "qemuPPC64le",
|
||||
"machine": q.qemuMachine,
|
||||
"kernel-params": q.kernelParams,
|
||||
|
@ -324,7 +324,7 @@ func (q *qemuS390x) enableProtection() error {
|
||||
q.qemuMachine.Options += ","
|
||||
}
|
||||
q.qemuMachine.Options += fmt.Sprintf("confidential-guest-support=%s", secExecID)
|
||||
virtLog.WithFields(logrus.Fields{
|
||||
hvLogger.WithFields(logrus.Fields{
|
||||
"subsystem": logSubsystem,
|
||||
"machine": q.qemuMachine}).
|
||||
Info("Enabling guest protection with Secure Execution")
|
||||
|
@ -78,7 +78,10 @@ func TestQemuCreateVM(t *testing.T) {
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
q := &qemu{
|
||||
store: store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: store.RunVMStoragePath(),
|
||||
RunStorePath: store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
@ -94,7 +97,7 @@ func TestQemuCreateVM(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
// Create parent dir path for hypervisor.json
|
||||
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
|
||||
parentDir := filepath.Join(store.RunStoragePath(), sandbox.id)
|
||||
assert.NoError(os.MkdirAll(parentDir, DirMode))
|
||||
|
||||
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
|
||||
@ -110,7 +113,10 @@ func TestQemuCreateVMMissingParentDirFail(t *testing.T) {
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
q := &qemu{
|
||||
store: store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: store.RunVMStoragePath(),
|
||||
RunStorePath: store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandbox := &Sandbox{
|
||||
ctx: context.Background(),
|
||||
@ -126,7 +132,7 @@ func TestQemuCreateVMMissingParentDirFail(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
// Ensure parent dir path for hypervisor.json does not exist.
|
||||
parentDir := filepath.Join(q.store.RunStoragePath(), sandbox.id)
|
||||
parentDir := filepath.Join(store.RunStoragePath(), sandbox.id)
|
||||
assert.NoError(os.RemoveAll(parentDir))
|
||||
|
||||
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
|
||||
@ -192,7 +198,10 @@ func TestQemuKnobs(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
q := &qemu{
|
||||
store: sandbox.store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: sandbox.store.RunVMStoragePath(),
|
||||
RunStorePath: sandbox.store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
|
||||
assert.NoError(err)
|
||||
@ -325,11 +334,14 @@ func TestQemuGetSandboxConsole(t *testing.T) {
|
||||
store, err := persist.GetDriver()
|
||||
assert.NoError(err)
|
||||
q := &qemu{
|
||||
ctx: context.Background(),
|
||||
store: store,
|
||||
ctx: context.Background(),
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: store.RunVMStoragePath(),
|
||||
RunStorePath: store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandboxID := "testSandboxID"
|
||||
expected := filepath.Join(q.store.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
expected := filepath.Join(store.RunVMStoragePath(), sandboxID, consoleSocket)
|
||||
|
||||
proto, result, err := q.GetVMConsole(q.ctx, sandboxID)
|
||||
assert.NoError(err)
|
||||
@ -460,7 +472,10 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
q := &qemu{
|
||||
store: sandbox.store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: sandbox.store.RunVMStoragePath(),
|
||||
RunStorePath: sandbox.store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
|
||||
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
|
||||
@ -475,7 +490,10 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
q = &qemu{
|
||||
store: sandbox.store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: sandbox.store.RunVMStoragePath(),
|
||||
RunStorePath: sandbox.store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandbox.config.HypervisorConfig.BootToBeTemplate = true
|
||||
sandbox.config.HypervisorConfig.SharedFS = config.VirtioFS
|
||||
@ -491,7 +509,10 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
q = &qemu{
|
||||
store: sandbox.store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: sandbox.store.RunVMStoragePath(),
|
||||
RunStorePath: sandbox.store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandbox.config.HypervisorConfig.FileBackedMemRootDir = "/tmp/xyzabc"
|
||||
err = q.CreateVM(context.Background(), sandbox.id, NetworkNamespace{}, &sandbox.config.HypervisorConfig)
|
||||
@ -505,7 +526,10 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
q = &qemu{
|
||||
store: sandbox.store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: sandbox.store.RunVMStoragePath(),
|
||||
RunStorePath: sandbox.store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandbox.config.HypervisorConfig.EnableVhostUserStore = true
|
||||
sandbox.config.HypervisorConfig.HugePages = true
|
||||
@ -518,7 +542,10 @@ func TestQemuFileBackedMem(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
q = &qemu{
|
||||
store: sandbox.store,
|
||||
config: HypervisorConfig{
|
||||
VMStorePath: sandbox.store.RunVMStoragePath(),
|
||||
RunStorePath: sandbox.store.RunStoragePath(),
|
||||
},
|
||||
}
|
||||
sandbox.config.HypervisorConfig.EnableVhostUserStore = true
|
||||
sandbox.config.HypervisorConfig.HugePages = false
|
||||
|
@ -525,12 +525,9 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
swapDevices: []*config.BlockDrive{},
|
||||
}
|
||||
|
||||
hypervisor.setSandbox(s)
|
||||
|
||||
if s.store, err = persist.GetDriver(); err != nil || s.store == nil {
|
||||
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
s.Logger().WithError(retErr).Error("Create new sandbox failed")
|
||||
@ -538,6 +535,9 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
|
||||
}
|
||||
}()
|
||||
|
||||
sandboxConfig.HypervisorConfig.VMStorePath = s.store.RunVMStoragePath()
|
||||
sandboxConfig.HypervisorConfig.RunStorePath = s.store.RunStoragePath()
|
||||
|
||||
spec := s.GetPatchedOCISpec()
|
||||
if spec != nil && spec.Process.SelinuxLabel != "" {
|
||||
sandboxConfig.HypervisorConfig.SELinuxProcessLabel = spec.Process.SelinuxLabel
|
||||
|
@ -60,7 +60,7 @@ var testHyperstartTtySocket = ""
|
||||
func cleanUp() {
|
||||
os.RemoveAll(fs.MockRunStoragePath())
|
||||
os.RemoveAll(fs.MockRunVMStoragePath())
|
||||
syscall.Unmount(getSharePath(testSandboxID), syscall.MNT_DETACH|UmountNoFollow)
|
||||
syscall.Unmount(GetSharePath(testSandboxID), syscall.MNT_DETACH|UmountNoFollow)
|
||||
os.RemoveAll(testDir)
|
||||
os.MkdirAll(testDir, DirMode)
|
||||
|
||||
|
@ -216,7 +216,7 @@ func (v *virtiofsd) valid() error {
|
||||
}
|
||||
|
||||
func (v *virtiofsd) Logger() *log.Entry {
|
||||
return virtLog.WithField("subsystem", "virtiofsd")
|
||||
return hvLogger.WithField("subsystem", "virtiofsd")
|
||||
}
|
||||
|
||||
func (v *virtiofsd) kill(ctx context.Context) (err error) {
|
||||
|
Loading…
Reference in New Issue
Block a user