mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-26 15:32:30 +00:00
virtcontainers: Split the factory package into Linux and Darwin bits
- split template - split factory - add stubs for darwin Signed-off-by: Eric Ernst <eric_ernst@apple.com>
This commit is contained in:
parent
2b4b825228
commit
60ff230d80
@ -7,17 +7,7 @@ package factory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/protocols/cache"
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/base"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/cache"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/direct"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/grpccache"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/template"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -43,56 +33,6 @@ type Config struct {
|
||||
VMCache bool
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
base base.FactoryBase
|
||||
}
|
||||
|
||||
// NewFactory returns a working factory.
|
||||
func NewFactory(ctx context.Context, config Config, fetchOnly bool) (vc.Factory, error) {
|
||||
span, _ := katatrace.Trace(ctx, nil, "NewFactory", factoryTracingTags)
|
||||
defer span.End()
|
||||
|
||||
err := config.VMConfig.Valid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fetchOnly && config.Cache > 0 {
|
||||
return nil, fmt.Errorf("cache factory does not support fetch")
|
||||
}
|
||||
|
||||
var b base.FactoryBase
|
||||
if config.VMCache && config.Cache == 0 {
|
||||
// For VMCache client
|
||||
b, err = grpccache.New(ctx, config.VMCacheEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if config.Template {
|
||||
if fetchOnly {
|
||||
b, err = template.Fetch(config.VMConfig, config.TemplatePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
b, err = template.New(ctx, config.VMConfig, config.TemplatePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b = direct.New(ctx, config.VMConfig)
|
||||
}
|
||||
|
||||
if config.Cache > 0 {
|
||||
b = cache.New(ctx, config.Cache, b)
|
||||
}
|
||||
}
|
||||
|
||||
return &factory{b}, nil
|
||||
}
|
||||
|
||||
// SetLogger sets the logger for the factory.
|
||||
func SetLogger(ctx context.Context, logger logrus.FieldLogger) {
|
||||
fields := logrus.Fields{
|
||||
@ -105,135 +45,3 @@ func SetLogger(ctx context.Context, logger logrus.FieldLogger) {
|
||||
func (f *factory) log() *logrus.Entry {
|
||||
return factoryLogger.WithField("subsystem", "factory")
|
||||
}
|
||||
|
||||
func resetHypervisorConfig(config *vc.VMConfig) {
|
||||
config.HypervisorConfig.NumVCPUs = 0
|
||||
config.HypervisorConfig.MemorySize = 0
|
||||
config.HypervisorConfig.BootToBeTemplate = false
|
||||
config.HypervisorConfig.BootFromTemplate = false
|
||||
config.HypervisorConfig.MemoryPath = ""
|
||||
config.HypervisorConfig.DevicesStatePath = ""
|
||||
config.HypervisorConfig.SharedPath = ""
|
||||
config.HypervisorConfig.VMStorePath = ""
|
||||
config.HypervisorConfig.RunStorePath = ""
|
||||
}
|
||||
|
||||
// It's important that baseConfig and newConfig are passed by value!
|
||||
func checkVMConfig(baseConfig, newConfig vc.VMConfig) error {
|
||||
if baseConfig.HypervisorType != newConfig.HypervisorType {
|
||||
return fmt.Errorf("hypervisor type does not match: %s vs. %s", baseConfig.HypervisorType, newConfig.HypervisorType)
|
||||
}
|
||||
|
||||
// check hypervisor config details
|
||||
resetHypervisorConfig(&baseConfig)
|
||||
resetHypervisorConfig(&newConfig)
|
||||
|
||||
if !utils.DeepCompare(baseConfig, newConfig) {
|
||||
return fmt.Errorf("hypervisor config does not match, base: %+v. new: %+v", baseConfig, newConfig)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *factory) checkConfig(config vc.VMConfig) error {
|
||||
baseConfig := f.base.Config()
|
||||
|
||||
return checkVMConfig(baseConfig, config)
|
||||
}
|
||||
|
||||
// GetVM returns a working blank VM created by the factory.
|
||||
func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
span, ctx := katatrace.Trace(ctx, f.log(), "GetVM", factoryTracingTags)
|
||||
defer span.End()
|
||||
|
||||
hypervisorConfig := config.HypervisorConfig
|
||||
if err := config.Valid(); err != nil {
|
||||
f.log().WithError(err).Error("invalid hypervisor config")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := f.checkConfig(config)
|
||||
if err != nil {
|
||||
f.log().WithError(err).Info("fallback to direct factory vm")
|
||||
return direct.New(ctx, config).GetBaseVM(ctx, config)
|
||||
}
|
||||
|
||||
f.log().Info("get base VM")
|
||||
vm, err := f.base.GetBaseVM(ctx, config)
|
||||
if err != nil {
|
||||
f.log().WithError(err).Error("failed to get base VM")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cleanup upon error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
f.log().WithError(err).Error("clean up vm")
|
||||
vm.Stop(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
err = vm.Resume(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// reseed RNG so that shared memory VMs do not generate same random numbers.
|
||||
err = vm.ReseedRNG(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// sync guest time since we might have paused it for a long time.
|
||||
err = vm.SyncTime(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
online := false
|
||||
baseConfig := f.base.Config().HypervisorConfig
|
||||
if baseConfig.NumVCPUs < hypervisorConfig.NumVCPUs {
|
||||
err = vm.AddCPUs(ctx, hypervisorConfig.NumVCPUs-baseConfig.NumVCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
online = true
|
||||
}
|
||||
|
||||
if baseConfig.MemorySize < hypervisorConfig.MemorySize {
|
||||
err = vm.AddMemory(ctx, hypervisorConfig.MemorySize-baseConfig.MemorySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
online = true
|
||||
}
|
||||
|
||||
if online {
|
||||
err = vm.OnlineCPUMemory(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
// Config returns base factory config.
|
||||
func (f *factory) Config() vc.VMConfig {
|
||||
return f.base.Config()
|
||||
}
|
||||
|
||||
// GetVMStatus returns the status of the paused VM created by the base factory.
|
||||
func (f *factory) GetVMStatus() []*pb.GrpcVMStatus {
|
||||
return f.base.GetVMStatus()
|
||||
}
|
||||
|
||||
// GetBaseVM returns a paused VM created by the base factory.
|
||||
func (f *factory) GetBaseVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
return f.base.GetBaseVM(ctx, config)
|
||||
}
|
||||
|
||||
// CloseFactory closes the factory.
|
||||
func (f *factory) CloseFactory(ctx context.Context) {
|
||||
f.base.CloseFactory(ctx)
|
||||
}
|
||||
|
43
src/runtime/virtcontainers/factory/factory_darwin.go
Normal file
43
src/runtime/virtcontainers/factory/factory_darwin.go
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright (c) 2022 Apple Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/protocols/cache"
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var unsupportedFactory error = errors.New("VM factory is unsupported on Darwin")
|
||||
|
||||
type factory struct {
|
||||
}
|
||||
|
||||
func NewFactory(ctx context.Context, config Config, fetchOnly bool) (vc.Factory, error) {
|
||||
return &factory{}, unsupportedFactory
|
||||
}
|
||||
|
||||
func (f *factory) Config() vc.VMConfig {
|
||||
return vc.VMConfig{}
|
||||
}
|
||||
|
||||
func (f *factory) GetVMStatus() []*pb.GrpcVMStatus {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
return nil, unsupportedFactory
|
||||
}
|
||||
|
||||
func (f *factory) GetBaseVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
return nil, unsupportedFactory
|
||||
}
|
||||
|
||||
func (f *factory) CloseFactory(ctx context.Context) {
|
||||
return
|
||||
}
|
203
src/runtime/virtcontainers/factory/factory_linux.go
Normal file
203
src/runtime/virtcontainers/factory/factory_linux.go
Normal file
@ -0,0 +1,203 @@
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/protocols/cache"
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/base"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/cache"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/direct"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/grpccache"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/template"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
|
||||
)
|
||||
|
||||
type factory struct {
|
||||
base base.FactoryBase
|
||||
}
|
||||
|
||||
// NewFactory returns a working factory.
|
||||
func NewFactory(ctx context.Context, config Config, fetchOnly bool) (vc.Factory, error) {
|
||||
span, _ := katatrace.Trace(ctx, nil, "NewFactory", factoryTracingTags)
|
||||
defer span.End()
|
||||
|
||||
err := config.VMConfig.Valid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fetchOnly && config.Cache > 0 {
|
||||
return nil, fmt.Errorf("cache factory does not support fetch")
|
||||
}
|
||||
|
||||
var b base.FactoryBase
|
||||
if config.VMCache && config.Cache == 0 {
|
||||
// For VMCache client
|
||||
b, err = grpccache.New(ctx, config.VMCacheEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if config.Template {
|
||||
if fetchOnly {
|
||||
b, err = template.Fetch(config.VMConfig, config.TemplatePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
b, err = template.New(ctx, config.VMConfig, config.TemplatePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b = direct.New(ctx, config.VMConfig)
|
||||
}
|
||||
|
||||
if config.Cache > 0 {
|
||||
b = cache.New(ctx, config.Cache, b)
|
||||
}
|
||||
}
|
||||
|
||||
return &factory{b}, nil
|
||||
}
|
||||
|
||||
func resetHypervisorConfig(config *vc.VMConfig) {
|
||||
config.HypervisorConfig.NumVCPUs = 0
|
||||
config.HypervisorConfig.MemorySize = 0
|
||||
config.HypervisorConfig.BootToBeTemplate = false
|
||||
config.HypervisorConfig.BootFromTemplate = false
|
||||
config.HypervisorConfig.MemoryPath = ""
|
||||
config.HypervisorConfig.DevicesStatePath = ""
|
||||
config.HypervisorConfig.SharedPath = ""
|
||||
config.HypervisorConfig.VMStorePath = ""
|
||||
config.HypervisorConfig.RunStorePath = ""
|
||||
}
|
||||
|
||||
// It's important that baseConfig and newConfig are passed by value!
|
||||
func checkVMConfig(baseConfig, newConfig vc.VMConfig) error {
|
||||
if baseConfig.HypervisorType != newConfig.HypervisorType {
|
||||
return fmt.Errorf("hypervisor type does not match: %s vs. %s", baseConfig.HypervisorType, newConfig.HypervisorType)
|
||||
}
|
||||
|
||||
// check hypervisor config details
|
||||
resetHypervisorConfig(&baseConfig)
|
||||
resetHypervisorConfig(&newConfig)
|
||||
|
||||
if !utils.DeepCompare(baseConfig, newConfig) {
|
||||
return fmt.Errorf("hypervisor config does not match, base: %+v. new: %+v", baseConfig, newConfig)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *factory) checkConfig(config vc.VMConfig) error {
|
||||
baseConfig := f.base.Config()
|
||||
|
||||
return checkVMConfig(baseConfig, config)
|
||||
}
|
||||
|
||||
// GetVM returns a working blank VM created by the factory.
|
||||
func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
span, ctx := katatrace.Trace(ctx, f.log(), "GetVM", factoryTracingTags)
|
||||
defer span.End()
|
||||
|
||||
hypervisorConfig := config.HypervisorConfig
|
||||
if err := config.Valid(); err != nil {
|
||||
f.log().WithError(err).Error("invalid hypervisor config")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := f.checkConfig(config)
|
||||
if err != nil {
|
||||
f.log().WithError(err).Info("fallback to direct factory vm")
|
||||
return direct.New(ctx, config).GetBaseVM(ctx, config)
|
||||
}
|
||||
|
||||
f.log().Info("get base VM")
|
||||
vm, err := f.base.GetBaseVM(ctx, config)
|
||||
if err != nil {
|
||||
f.log().WithError(err).Error("failed to get base VM")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// cleanup upon error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
f.log().WithError(err).Error("clean up vm")
|
||||
vm.Stop(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
err = vm.Resume(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// reseed RNG so that shared memory VMs do not generate same random numbers.
|
||||
err = vm.ReseedRNG(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// sync guest time since we might have paused it for a long time.
|
||||
err = vm.SyncTime(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
online := false
|
||||
baseConfig := f.base.Config().HypervisorConfig
|
||||
if baseConfig.NumVCPUs < hypervisorConfig.NumVCPUs {
|
||||
err = vm.AddCPUs(ctx, hypervisorConfig.NumVCPUs-baseConfig.NumVCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
online = true
|
||||
}
|
||||
|
||||
if baseConfig.MemorySize < hypervisorConfig.MemorySize {
|
||||
err = vm.AddMemory(ctx, hypervisorConfig.MemorySize-baseConfig.MemorySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
online = true
|
||||
}
|
||||
|
||||
if online {
|
||||
err = vm.OnlineCPUMemory(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
// Config returns base factory config.
|
||||
func (f *factory) Config() vc.VMConfig {
|
||||
return f.base.Config()
|
||||
}
|
||||
|
||||
// GetVMStatus returns the status of the paused VM created by the base factory.
|
||||
func (f *factory) GetVMStatus() []*pb.GrpcVMStatus {
|
||||
return f.base.GetVMStatus()
|
||||
}
|
||||
|
||||
// GetBaseVM returns a paused VM created by the base factory.
|
||||
func (f *factory) GetBaseVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
return f.base.GetBaseVM(ctx, config)
|
||||
}
|
||||
|
||||
// CloseFactory closes the factory.
|
||||
func (f *factory) CloseFactory(ctx context.Context) {
|
||||
f.base.CloseFactory(ctx)
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
//
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
@ -8,178 +9,12 @@ package template
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/protocols/cache"
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/base"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type template struct {
|
||||
statePath string
|
||||
config vc.VMConfig
|
||||
}
|
||||
|
||||
var templateWaitForAgent = 2 * time.Second
|
||||
var templateLog = logrus.WithField("source", "virtcontainers/factory/template")
|
||||
|
||||
// Fetch finds and returns a pre-built template factory.
|
||||
// TODO: save template metadata and fetch from storage.
|
||||
func Fetch(config vc.VMConfig, templatePath string) (base.FactoryBase, error) {
|
||||
t := &template{templatePath, config}
|
||||
|
||||
err := t.checkTemplateVM()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// New creates a new VM template factory.
|
||||
func New(ctx context.Context, config vc.VMConfig, templatePath string) (base.FactoryBase, error) {
|
||||
t := &template{templatePath, config}
|
||||
|
||||
err := t.checkTemplateVM()
|
||||
if err == nil {
|
||||
return nil, fmt.Errorf("There is already a VM template in %s", templatePath)
|
||||
}
|
||||
|
||||
err = t.prepareTemplateFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
t.close()
|
||||
}
|
||||
}()
|
||||
|
||||
err = t.createTemplateVM(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Config returns template factory's configuration.
|
||||
func (t *template) Config() vc.VMConfig {
|
||||
return t.config
|
||||
}
|
||||
|
||||
// GetBaseVM creates a new paused VM from the template VM.
|
||||
func (t *template) GetBaseVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
return t.createFromTemplateVM(ctx, config)
|
||||
}
|
||||
|
||||
// CloseFactory cleans up the template VM.
|
||||
func (t *template) CloseFactory(ctx context.Context) {
|
||||
t.close()
|
||||
}
|
||||
|
||||
// GetVMStatus is not supported
|
||||
func (t *template) GetVMStatus() []*pb.GrpcVMStatus {
|
||||
panic("ERROR: package template does not support GetVMStatus")
|
||||
}
|
||||
|
||||
func (t *template) close() {
|
||||
if err := syscall.Unmount(t.statePath, syscall.MNT_DETACH); err != nil {
|
||||
t.Logger().WithError(err).Errorf("failed to unmount %s", t.statePath)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(t.statePath); err != nil {
|
||||
t.Logger().WithError(err).Errorf("failed to remove %s", t.statePath)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *template) prepareTemplateFiles() error {
|
||||
// create and mount tmpfs for the shared memory file
|
||||
err := os.MkdirAll(t.statePath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flags := uintptr(syscall.MS_NOSUID | syscall.MS_NODEV)
|
||||
opts := fmt.Sprintf("size=%dM", t.config.HypervisorConfig.MemorySize+templateDeviceStateSize)
|
||||
if err = syscall.Mount("tmpfs", t.statePath, "tmpfs", flags, opts); err != nil {
|
||||
t.close()
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(t.statePath + "/memory")
|
||||
if err != nil {
|
||||
t.close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *template) createTemplateVM(ctx context.Context) error {
|
||||
// create the template vm
|
||||
config := t.config
|
||||
config.HypervisorConfig.BootToBeTemplate = true
|
||||
config.HypervisorConfig.BootFromTemplate = false
|
||||
config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
|
||||
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
|
||||
|
||||
vm, err := vc.NewVM(ctx, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer vm.Stop(ctx)
|
||||
|
||||
if err = vm.Disconnect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sleep a bit to let the agent grpc server clean up
|
||||
// When we close connection to the agent, it needs sometime to cleanup
|
||||
// and restart listening on the communication( serial or vsock) port.
|
||||
// That time can be saved if we sleep a bit to wait for the agent to
|
||||
// come around and start listening again. The sleep is only done when
|
||||
// creating new vm templates and saves time for every new vm that are
|
||||
// created from template, so it worth the invest.
|
||||
time.Sleep(templateWaitForAgent)
|
||||
|
||||
if err = vm.Pause(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = vm.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *template) createFromTemplateVM(ctx context.Context, c vc.VMConfig) (*vc.VM, error) {
|
||||
config := t.config
|
||||
config.HypervisorConfig.BootToBeTemplate = false
|
||||
config.HypervisorConfig.BootFromTemplate = true
|
||||
config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
|
||||
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
|
||||
config.HypervisorConfig.SharedPath = c.HypervisorConfig.SharedPath
|
||||
config.HypervisorConfig.VMStorePath = c.HypervisorConfig.VMStorePath
|
||||
config.HypervisorConfig.RunStorePath = c.HypervisorConfig.RunStorePath
|
||||
|
||||
return vc.NewVM(ctx, config)
|
||||
}
|
||||
|
||||
func (t *template) checkTemplateVM() error {
|
||||
_, err := os.Stat(t.statePath + "/memory")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = os.Stat(t.statePath + "/state")
|
||||
return err
|
||||
}
|
||||
|
||||
// Logger returns a logrus logger appropriate for logging template messages
|
||||
func (t *template) Logger() *logrus.Entry {
|
||||
return templateLog.WithFields(logrus.Fields{
|
||||
|
@ -0,0 +1,10 @@
|
||||
//
|
||||
// Copyright (c) 2022 Apple, Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// template implements base vm factory with vm templating.
|
||||
|
||||
package template
|
||||
|
||||
type template struct{}
|
180
src/runtime/virtcontainers/factory/template/template_linux.go
Normal file
180
src/runtime/virtcontainers/factory/template/template_linux.go
Normal file
@ -0,0 +1,180 @@
|
||||
//
|
||||
// Copyright (c) 2018 HyperHQ Inc.
|
||||
//
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
// template implements base vm factory with vm templating.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
pb "github.com/kata-containers/kata-containers/src/runtime/protocols/cache"
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/factory/base"
|
||||
)
|
||||
|
||||
type template struct {
|
||||
statePath string
|
||||
config vc.VMConfig
|
||||
}
|
||||
|
||||
var templateWaitForAgent = 2 * time.Second
|
||||
|
||||
// Fetch finds and returns a pre-built template factory.
|
||||
// TODO: save template metadata and fetch from storage.
|
||||
func Fetch(config vc.VMConfig, templatePath string) (base.FactoryBase, error) {
|
||||
t := &template{templatePath, config}
|
||||
|
||||
err := t.checkTemplateVM()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// New creates a new VM template factory.
|
||||
func New(ctx context.Context, config vc.VMConfig, templatePath string) (base.FactoryBase, error) {
|
||||
t := &template{templatePath, config}
|
||||
|
||||
err := t.checkTemplateVM()
|
||||
if err == nil {
|
||||
return nil, fmt.Errorf("There is already a VM template in %s", templatePath)
|
||||
}
|
||||
|
||||
err = t.prepareTemplateFiles()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
t.close()
|
||||
}
|
||||
}()
|
||||
|
||||
err = t.createTemplateVM(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Config returns template factory's configuration.
|
||||
func (t *template) Config() vc.VMConfig {
|
||||
return t.config
|
||||
}
|
||||
|
||||
// GetBaseVM creates a new paused VM from the template VM.
|
||||
func (t *template) GetBaseVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
return t.createFromTemplateVM(ctx, config)
|
||||
}
|
||||
|
||||
// CloseFactory cleans up the template VM.
|
||||
func (t *template) CloseFactory(ctx context.Context) {
|
||||
t.close()
|
||||
}
|
||||
|
||||
// GetVMStatus is not supported
|
||||
func (t *template) GetVMStatus() []*pb.GrpcVMStatus {
|
||||
panic("ERROR: package template does not support GetVMStatus")
|
||||
}
|
||||
|
||||
func (t *template) close() {
|
||||
if err := syscall.Unmount(t.statePath, syscall.MNT_DETACH); err != nil {
|
||||
t.Logger().WithError(err).Errorf("failed to unmount %s", t.statePath)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(t.statePath); err != nil {
|
||||
t.Logger().WithError(err).Errorf("failed to remove %s", t.statePath)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *template) prepareTemplateFiles() error {
|
||||
// create and mount tmpfs for the shared memory file
|
||||
err := os.MkdirAll(t.statePath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flags := uintptr(syscall.MS_NOSUID | syscall.MS_NODEV)
|
||||
opts := fmt.Sprintf("size=%dM", t.config.HypervisorConfig.MemorySize+templateDeviceStateSize)
|
||||
if err = syscall.Mount("tmpfs", t.statePath, "tmpfs", flags, opts); err != nil {
|
||||
t.close()
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(t.statePath + "/memory")
|
||||
if err != nil {
|
||||
t.close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *template) createTemplateVM(ctx context.Context) error {
|
||||
// create the template vm
|
||||
config := t.config
|
||||
config.HypervisorConfig.BootToBeTemplate = true
|
||||
config.HypervisorConfig.BootFromTemplate = false
|
||||
config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
|
||||
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
|
||||
|
||||
vm, err := vc.NewVM(ctx, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer vm.Stop(ctx)
|
||||
|
||||
if err = vm.Disconnect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sleep a bit to let the agent grpc server clean up
|
||||
// When we close connection to the agent, it needs sometime to cleanup
|
||||
// and restart listening on the communication( serial or vsock) port.
|
||||
// That time can be saved if we sleep a bit to wait for the agent to
|
||||
// come around and start listening again. The sleep is only done when
|
||||
// creating new vm templates and saves time for every new vm that are
|
||||
// created from template, so it worth the invest.
|
||||
time.Sleep(templateWaitForAgent)
|
||||
|
||||
if err = vm.Pause(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = vm.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *template) createFromTemplateVM(ctx context.Context, c vc.VMConfig) (*vc.VM, error) {
|
||||
config := t.config
|
||||
config.HypervisorConfig.BootToBeTemplate = false
|
||||
config.HypervisorConfig.BootFromTemplate = true
|
||||
config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
|
||||
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
|
||||
config.HypervisorConfig.SharedPath = c.HypervisorConfig.SharedPath
|
||||
config.HypervisorConfig.VMStorePath = c.HypervisorConfig.VMStorePath
|
||||
config.HypervisorConfig.RunStorePath = c.HypervisorConfig.RunStorePath
|
||||
|
||||
return vc.NewVM(ctx, config)
|
||||
}
|
||||
|
||||
func (t *template) checkTemplateVM() error {
|
||||
_, err := os.Stat(t.statePath + "/memory")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = os.Stat(t.statePath + "/state")
|
||||
return err
|
||||
}
|
Loading…
Reference in New Issue
Block a user