mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-06-27 15:57:09 +00:00
Merge pull request #3 from kata-containers/master
Merge #3 of kata-containers/runtime
This commit is contained in:
commit
7b8e15f3a7
1
.gitignore
vendored
1
.gitignore
vendored
@ -4,6 +4,7 @@
|
|||||||
.git-commit
|
.git-commit
|
||||||
.git-commit.tmp
|
.git-commit.tmp
|
||||||
/cli/config/configuration-acrn.toml
|
/cli/config/configuration-acrn.toml
|
||||||
|
/cli/config/configuration-clh.toml
|
||||||
/cli/config/configuration-fc.toml
|
/cli/config/configuration-fc.toml
|
||||||
/cli/config/configuration-nemu.toml
|
/cli/config/configuration-nemu.toml
|
||||||
/cli/config/configuration-qemu.toml
|
/cli/config/configuration-qemu.toml
|
||||||
|
@ -16,7 +16,9 @@ path = "@FCPATH@"
|
|||||||
# If the jailer path is not set kata will launch firecracker
|
# If the jailer path is not set kata will launch firecracker
|
||||||
# without a jail. If the jailer is set firecracker will be
|
# without a jail. If the jailer is set firecracker will be
|
||||||
# launched in a jailed enviornment created by the jailer
|
# launched in a jailed enviornment created by the jailer
|
||||||
jailer_path = "@FCJAILERPATH@"
|
# This is disabled by default as additional setup is required
|
||||||
|
# for this feature today.
|
||||||
|
#jailer_path = "@FCJAILERPATH@"
|
||||||
kernel = "@KERNELPATH_FC@"
|
kernel = "@KERNELPATH_FC@"
|
||||||
image = "@IMAGEPATH@"
|
image = "@IMAGEPATH@"
|
||||||
|
|
||||||
|
24
cli/main.go
24
cli/main.go
@ -21,6 +21,7 @@ import (
|
|||||||
"github.com/kata-containers/runtime/pkg/rootless"
|
"github.com/kata-containers/runtime/pkg/rootless"
|
||||||
"github.com/kata-containers/runtime/pkg/signals"
|
"github.com/kata-containers/runtime/pkg/signals"
|
||||||
vc "github.com/kata-containers/runtime/virtcontainers"
|
vc "github.com/kata-containers/runtime/virtcontainers"
|
||||||
|
exp "github.com/kata-containers/runtime/virtcontainers/experimental"
|
||||||
vf "github.com/kata-containers/runtime/virtcontainers/factory"
|
vf "github.com/kata-containers/runtime/virtcontainers/factory"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
|
"github.com/kata-containers/runtime/virtcontainers/pkg/oci"
|
||||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
@ -346,6 +347,11 @@ func beforeSubcommands(c *cli.Context) error {
|
|||||||
"arguments": `"` + args + `"`,
|
"arguments": `"` + args + `"`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = addExpFeatures(c, runtimeConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
kataLog.WithFields(fields).Info()
|
kataLog.WithFields(fields).Info()
|
||||||
|
|
||||||
// make the data accessible to the sub-commands.
|
// make the data accessible to the sub-commands.
|
||||||
@ -401,6 +407,24 @@ func setupTracing(context *cli.Context, rootSpanName string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add supported experimental features in context
|
||||||
|
func addExpFeatures(clictx *cli.Context, runtimeConfig oci.RuntimeConfig) error {
|
||||||
|
ctx, err := cliContextToContext(clictx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var exps []string
|
||||||
|
for _, e := range runtimeConfig.Experimental {
|
||||||
|
exps = append(exps, e.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = exp.ContextWithExp(ctx, exps)
|
||||||
|
// Add tracer to metadata and update the context
|
||||||
|
clictx.App.Metadata["context"] = ctx
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func afterSubcommands(c *cli.Context) error {
|
func afterSubcommands(c *cli.Context) error {
|
||||||
ctx, err := cliContextToContext(c)
|
ctx, err := cliContextToContext(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -65,6 +65,10 @@ func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shi
|
|||||||
// it will output into stdio, from which containerd would like
|
// it will output into stdio, from which containerd would like
|
||||||
// to get the shim's socket address.
|
// to get the shim's socket address.
|
||||||
logrus.SetOutput(ioutil.Discard)
|
logrus.SetOutput(ioutil.Discard)
|
||||||
|
opts := ctx.Value(cdshim.OptsKey{}).(cdshim.Opts)
|
||||||
|
if !opts.Debug {
|
||||||
|
logrus.SetLevel(logrus.WarnLevel)
|
||||||
|
}
|
||||||
vci.SetLogger(ctx, logger)
|
vci.SetLogger(ctx, logger)
|
||||||
katautils.SetLogger(ctx, logger, logger.Logger.Level)
|
katautils.SetLogger(ctx, logger, logger.Logger.Level)
|
||||||
|
|
||||||
@ -141,7 +145,10 @@ func newCommand(ctx context.Context, containerdBinary, id, containerdAddress str
|
|||||||
"-address", containerdAddress,
|
"-address", containerdAddress,
|
||||||
"-publish-binary", containerdBinary,
|
"-publish-binary", containerdBinary,
|
||||||
"-id", id,
|
"-id", id,
|
||||||
"-debug",
|
}
|
||||||
|
opts := ctx.Value(cdshim.OptsKey{}).(cdshim.Opts)
|
||||||
|
if opts.Debug {
|
||||||
|
args = append(args, "-debug")
|
||||||
}
|
}
|
||||||
cmd := sysexec.Command(self, args...)
|
cmd := sysexec.Command(self, args...)
|
||||||
cmd.Dir = cwd
|
cmd.Dir = cwd
|
||||||
|
@ -70,6 +70,13 @@ assets:
|
|||||||
hypervisor:
|
hypervisor:
|
||||||
description: "Component used to create virtual machines"
|
description: "Component used to create virtual machines"
|
||||||
|
|
||||||
|
cloud_hypervisor:
|
||||||
|
description: "Cloud Hypervisor is an open source Virtual Machine Monitor"
|
||||||
|
url: "https://github.com/intel/cloud-hypervisor"
|
||||||
|
uscan-url: >-
|
||||||
|
https://github.com/intel/cloud-hypervisor/tags.*/v?(\d\S+)\.tar\.gz
|
||||||
|
version: "v0.3.0"
|
||||||
|
|
||||||
firecracker:
|
firecracker:
|
||||||
description: "Firecracker micro-VMM"
|
description: "Firecracker micro-VMM"
|
||||||
url: "https://github.com/firecracker-microvm/firecracker"
|
url: "https://github.com/firecracker-microvm/firecracker"
|
||||||
|
@ -513,15 +513,11 @@ func (a *Acrn) stopSandbox() (err error) {
|
|||||||
|
|
||||||
pid := a.state.PID
|
pid := a.state.PID
|
||||||
|
|
||||||
// Check if VM process is running, in case it is not, let's
|
|
||||||
// return from here.
|
|
||||||
if err = syscall.Kill(pid, syscall.Signal(0)); err != nil {
|
|
||||||
a.Logger().Info("acrn VM already stopped")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send signal to the VM process to try to stop it properly
|
// Send signal to the VM process to try to stop it properly
|
||||||
if err = syscall.Kill(pid, syscall.SIGINT); err != nil {
|
if err = syscall.Kill(pid, syscall.SIGINT); err != nil {
|
||||||
|
if err == syscall.ESRCH {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
a.Logger().Info("Sending signal to stop acrn VM failed")
|
a.Logger().Info("Sending signal to stop acrn VM failed")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
deviceApi "github.com/kata-containers/runtime/virtcontainers/device/api"
|
deviceApi "github.com/kata-containers/runtime/virtcontainers/device/api"
|
||||||
deviceConfig "github.com/kata-containers/runtime/virtcontainers/device/config"
|
deviceConfig "github.com/kata-containers/runtime/virtcontainers/device/config"
|
||||||
|
"github.com/kata-containers/runtime/virtcontainers/persist/fs"
|
||||||
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
vcTypes "github.com/kata-containers/runtime/virtcontainers/pkg/types"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/store"
|
"github.com/kata-containers/runtime/virtcontainers/store"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||||
@ -307,7 +308,14 @@ func ListSandbox(ctx context.Context) ([]SandboxStatus, error) {
|
|||||||
span, ctx := trace(ctx, "ListSandbox")
|
span, ctx := trace(ctx, "ListSandbox")
|
||||||
defer span.Finish()
|
defer span.Finish()
|
||||||
|
|
||||||
dir, err := os.Open(store.ConfigStoragePath())
|
var sbsdir string
|
||||||
|
if supportNewStore(ctx) {
|
||||||
|
sbsdir = fs.RunStoragePath()
|
||||||
|
} else {
|
||||||
|
sbsdir = store.RunStoragePath()
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := os.Open(sbsdir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
// No sandbox directory is not an error
|
// No sandbox directory is not an error
|
||||||
|
@ -192,13 +192,6 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
|
|||||||
iommu: false,
|
iommu: false,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add the hybrid vsock device to hypervisor
|
|
||||||
clh.cliBuilder.SetVsock(&CLIVsock{
|
|
||||||
cid: 3,
|
|
||||||
socketPath: clh.socketPath,
|
|
||||||
iommu: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
// set the initial root/boot disk of hypervisor
|
// set the initial root/boot disk of hypervisor
|
||||||
imagePath, err := clh.config.ImageAssetPath()
|
imagePath, err := clh.config.ImageAssetPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -436,7 +429,18 @@ func (clh *cloudHypervisor) addDevice(devInfo interface{}, devType deviceType) e
|
|||||||
device: v.Name(),
|
device: v.Name(),
|
||||||
mac: v.HardwareAddr(),
|
mac: v.HardwareAddr(),
|
||||||
})
|
})
|
||||||
|
case types.HybridVSock:
|
||||||
|
clh.Logger().WithFields(log.Fields{
|
||||||
|
"function": "addDevice",
|
||||||
|
"path": v.UdsPath,
|
||||||
|
"cid": v.ContextID,
|
||||||
|
"port": v.Port,
|
||||||
|
}).Info("Adding HybridVSock")
|
||||||
|
clh.cliBuilder.SetVsock(&CLIVsock{
|
||||||
|
cid: uint32(v.ContextID),
|
||||||
|
socketPath: v.UdsPath,
|
||||||
|
iommu: false,
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
clh.Logger().WithField("function", "addDevice").Warnf("Add device of type %v is not supported.", v)
|
clh.Logger().WithField("function", "addDevice").Warnf("Add device of type %v is not supported.", v)
|
||||||
}
|
}
|
||||||
@ -544,7 +548,7 @@ func (clh *cloudHypervisor) reset() {
|
|||||||
|
|
||||||
func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{}, error) {
|
func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{}, error) {
|
||||||
if !useVsock {
|
if !useVsock {
|
||||||
return nil, fmt.Errorf("Can't generate socket path for cloud-hypervisor: vsocks is disabled")
|
return nil, fmt.Errorf("Can't generate hybrid vsocket for cloud-hypervisor: vsocks is disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
udsPath, err := clh.vsockSocketPath(id)
|
udsPath, err := clh.vsockSocketPath(id)
|
||||||
@ -552,10 +556,14 @@ func (clh *cloudHypervisor) generateSocket(id string, useVsock bool) (interface{
|
|||||||
clh.Logger().Info("Can't generate socket path for cloud-hypervisor")
|
clh.Logger().Info("Can't generate socket path for cloud-hypervisor")
|
||||||
return types.HybridVSock{}, err
|
return types.HybridVSock{}, err
|
||||||
}
|
}
|
||||||
clh.Logger().WithField("function", "generateSocket").Infof("Using hybrid vsock %s:%d", udsPath, vSockPort)
|
_, cid, err := utils.FindContextID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
clh.socketPath = udsPath
|
clh.socketPath = udsPath
|
||||||
return types.HybridVSock{
|
return types.HybridVSock{
|
||||||
UdsPath: udsPath,
|
UdsPath: udsPath,
|
||||||
|
ContextID: cid,
|
||||||
Port: uint32(vSockPort),
|
Port: uint32(vSockPort),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -417,6 +417,7 @@ func (c *Container) storeContainer() error {
|
|||||||
if err := c.sandbox.Save(); err != nil {
|
if err := c.sandbox.Save(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return c.store.Store(store.Configuration, *(c.config))
|
return c.store.Store(store.Configuration, *(c.config))
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package experimental
|
package experimental
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
)
|
)
|
||||||
@ -22,8 +23,11 @@ type Feature struct {
|
|||||||
ExpRelease string
|
ExpRelease string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type contextKey struct{}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
supportedFeatures = make(map[string]Feature)
|
supportedFeatures = make(map[string]Feature)
|
||||||
|
expContextKey = contextKey{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register register a new experimental feature
|
// Register register a new experimental feature
|
||||||
@ -61,3 +65,16 @@ func validateFeature(feature Feature) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ContextWithExp(ctx context.Context, names []string) context.Context {
|
||||||
|
return context.WithValue(ctx, expContextKey, names)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExpFromContext(ctx context.Context) []string {
|
||||||
|
value := ctx.Value(expContextKey)
|
||||||
|
if value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
names := value.([]string)
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/containerd/fifo"
|
||||||
httptransport "github.com/go-openapi/runtime/client"
|
httptransport "github.com/go-openapi/runtime/client"
|
||||||
"github.com/go-openapi/strfmt"
|
"github.com/go-openapi/strfmt"
|
||||||
kataclient "github.com/kata-containers/agent/protocols/client"
|
kataclient "github.com/kata-containers/agent/protocols/client"
|
||||||
@ -63,12 +64,15 @@ const (
|
|||||||
// We attach a pool of placeholder drives before the guest has started, and then
|
// We attach a pool of placeholder drives before the guest has started, and then
|
||||||
// patch the replace placeholder drives with drives with actual contents.
|
// patch the replace placeholder drives with drives with actual contents.
|
||||||
fcDiskPoolSize = 8
|
fcDiskPoolSize = 8
|
||||||
|
|
||||||
defaultHybridVSocketName = "kata.hvsock"
|
defaultHybridVSocketName = "kata.hvsock"
|
||||||
|
|
||||||
// This is the first usable vsock context ID. All the vsocks can use the same
|
// This is the first usable vsock context ID. All the vsocks can use the same
|
||||||
// ID, since it's only used in the guest.
|
// ID, since it's only used in the guest.
|
||||||
defaultGuestVSockCID = int64(0x3)
|
defaultGuestVSockCID = int64(0x3)
|
||||||
|
|
||||||
|
// This is related to firecracker logging scheme
|
||||||
|
fcLogFifo = "logs.fifo"
|
||||||
|
fcMetricsFifo = "metrics.fifo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Specify the minimum version of firecracker supported
|
// Specify the minimum version of firecracker supported
|
||||||
@ -467,14 +471,11 @@ func (fc *firecracker) fcEnd() (err error) {
|
|||||||
|
|
||||||
pid := fc.info.PID
|
pid := fc.info.PID
|
||||||
|
|
||||||
// Check if VM process is running, in case it is not, let's
|
|
||||||
// return from here.
|
|
||||||
if err = syscall.Kill(pid, syscall.Signal(0)); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a SIGTERM to the VM process to try to stop it properly
|
// Send a SIGTERM to the VM process to try to stop it properly
|
||||||
if err = syscall.Kill(pid, syscall.SIGTERM); err != nil {
|
if err = syscall.Kill(pid, syscall.SIGTERM); err != nil {
|
||||||
|
if err == syscall.ESRCH {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -616,6 +617,69 @@ func (fc *firecracker) fcSetVMBaseConfig(mem int64, vcpus int64, htEnabled bool)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fc *firecracker) fcSetLogger() error {
|
||||||
|
span, _ := fc.trace("fcSetLogger")
|
||||||
|
defer span.Finish()
|
||||||
|
|
||||||
|
fcLogLevel := "Error"
|
||||||
|
|
||||||
|
// listen to log fifo file and transfer error info
|
||||||
|
jailedLogFifo, err := fc.fcListenToFifo(fcLogFifo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed setting log: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// listen to metrics file and transfer error info
|
||||||
|
jailedMetricsFifo, err := fc.fcListenToFifo(fcMetricsFifo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed setting log: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
param := ops.NewPutLoggerParams()
|
||||||
|
cfg := &models.Logger{
|
||||||
|
Level: &fcLogLevel,
|
||||||
|
LogFifo: &jailedLogFifo,
|
||||||
|
MetricsFifo: &jailedMetricsFifo,
|
||||||
|
Options: []string{},
|
||||||
|
}
|
||||||
|
param.SetBody(cfg)
|
||||||
|
_, err = fc.client().Operations.PutLogger(param)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fc *firecracker) fcListenToFifo(fifoName string) (string, error) {
|
||||||
|
fcFifoPath := filepath.Join(fc.vmPath, fifoName)
|
||||||
|
fcFifo, err := fifo.OpenFifo(context.Background(), fcFifoPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Failed to open/create fifo file %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
jailedFifoPath, err := fc.fcJailResource(fcFifoPath, fifoName)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
scanner := bufio.NewScanner(fcFifo)
|
||||||
|
for scanner.Scan() {
|
||||||
|
fc.Logger().WithFields(logrus.Fields{
|
||||||
|
"fifoName": fifoName,
|
||||||
|
"contents": scanner.Text()}).Error("firecracker failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
fc.Logger().WithError(err).Errorf("Failed reading firecracker fifo file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fcFifo.Close(); err != nil {
|
||||||
|
fc.Logger().WithError(err).Errorf("Failed closing firecracker fifo file")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return jailedFifoPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (fc *firecracker) fcStartVM() error {
|
func (fc *firecracker) fcStartVM() error {
|
||||||
fc.Logger().Info("start firecracker virtual machine")
|
fc.Logger().Info("start firecracker virtual machine")
|
||||||
span, _ := fc.trace("fcStartVM")
|
span, _ := fc.trace("fcStartVM")
|
||||||
@ -712,6 +776,10 @@ func (fc *firecracker) startSandbox(timeout int) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := fc.fcSetLogger(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err := fc.fcStartVM(); err != nil {
|
if err := fc.fcStartVM(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -772,6 +840,8 @@ func (fc *firecracker) cleanupJail() {
|
|||||||
|
|
||||||
fc.umountResource(fcKernel)
|
fc.umountResource(fcKernel)
|
||||||
fc.umountResource(fcRootfs)
|
fc.umountResource(fcRootfs)
|
||||||
|
fc.umountResource(fcLogFifo)
|
||||||
|
fc.umountResource(fcMetricsFifo)
|
||||||
|
|
||||||
fc.Logger().WithField("cleaningJail", fc.vmPath).Info()
|
fc.Logger().WithField("cleaningJail", fc.vmPath).Info()
|
||||||
if err := os.RemoveAll(fc.vmPath); err != nil {
|
if err := os.RemoveAll(fc.vmPath); err != nil {
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/kata-containers/runtime/virtcontainers/persist"
|
"github.com/kata-containers/runtime/virtcontainers/persist"
|
||||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||||
"github.com/kata-containers/runtime/virtcontainers/types"
|
"github.com/kata-containers/runtime/virtcontainers/types"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -171,6 +172,119 @@ func (s *Sandbox) dumpNetwork(ss *persistapi.SandboxState) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Sandbox) dumpConfig(ss *persistapi.SandboxState) {
|
||||||
|
sconfig := s.config
|
||||||
|
ss.Config = persistapi.SandboxConfig{
|
||||||
|
HypervisorType: string(sconfig.HypervisorType),
|
||||||
|
AgentType: string(sconfig.AgentType),
|
||||||
|
ProxyType: string(sconfig.ProxyType),
|
||||||
|
ProxyConfig: persistapi.ProxyConfig{
|
||||||
|
Path: sconfig.ProxyConfig.Path,
|
||||||
|
Debug: sconfig.ProxyConfig.Debug,
|
||||||
|
},
|
||||||
|
ShimType: string(sconfig.ShimType),
|
||||||
|
NetworkConfig: persistapi.NetworkConfig{
|
||||||
|
NetNSPath: sconfig.NetworkConfig.NetNSPath,
|
||||||
|
NetNsCreated: sconfig.NetworkConfig.NetNsCreated,
|
||||||
|
DisableNewNetNs: sconfig.NetworkConfig.DisableNewNetNs,
|
||||||
|
InterworkingModel: int(sconfig.NetworkConfig.InterworkingModel),
|
||||||
|
},
|
||||||
|
|
||||||
|
ShmSize: sconfig.ShmSize,
|
||||||
|
SharePidNs: sconfig.SharePidNs,
|
||||||
|
Stateful: sconfig.Stateful,
|
||||||
|
SystemdCgroup: sconfig.SystemdCgroup,
|
||||||
|
SandboxCgroupOnly: sconfig.SandboxCgroupOnly,
|
||||||
|
DisableGuestSeccomp: sconfig.DisableGuestSeccomp,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range sconfig.Experimental {
|
||||||
|
ss.Config.Experimental = append(ss.Config.Experimental, e.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
ss.Config.HypervisorConfig = persistapi.HypervisorConfig{
|
||||||
|
NumVCPUs: sconfig.HypervisorConfig.NumVCPUs,
|
||||||
|
DefaultMaxVCPUs: sconfig.HypervisorConfig.DefaultMaxVCPUs,
|
||||||
|
MemorySize: sconfig.HypervisorConfig.MemorySize,
|
||||||
|
DefaultBridges: sconfig.HypervisorConfig.DefaultBridges,
|
||||||
|
Msize9p: sconfig.HypervisorConfig.Msize9p,
|
||||||
|
MemSlots: sconfig.HypervisorConfig.MemSlots,
|
||||||
|
MemOffset: sconfig.HypervisorConfig.MemOffset,
|
||||||
|
VirtioFSCacheSize: sconfig.HypervisorConfig.VirtioFSCacheSize,
|
||||||
|
KernelPath: sconfig.HypervisorConfig.KernelPath,
|
||||||
|
ImagePath: sconfig.HypervisorConfig.ImagePath,
|
||||||
|
InitrdPath: sconfig.HypervisorConfig.InitrdPath,
|
||||||
|
FirmwarePath: sconfig.HypervisorConfig.FirmwarePath,
|
||||||
|
MachineAccelerators: sconfig.HypervisorConfig.MachineAccelerators,
|
||||||
|
HypervisorPath: sconfig.HypervisorConfig.HypervisorPath,
|
||||||
|
HypervisorCtlPath: sconfig.HypervisorConfig.HypervisorCtlPath,
|
||||||
|
JailerPath: sconfig.HypervisorConfig.JailerPath,
|
||||||
|
BlockDeviceDriver: sconfig.HypervisorConfig.BlockDeviceDriver,
|
||||||
|
HypervisorMachineType: sconfig.HypervisorConfig.HypervisorMachineType,
|
||||||
|
MemoryPath: sconfig.HypervisorConfig.MemoryPath,
|
||||||
|
DevicesStatePath: sconfig.HypervisorConfig.DevicesStatePath,
|
||||||
|
EntropySource: sconfig.HypervisorConfig.EntropySource,
|
||||||
|
SharedFS: sconfig.HypervisorConfig.SharedFS,
|
||||||
|
VirtioFSDaemon: sconfig.HypervisorConfig.VirtioFSDaemon,
|
||||||
|
VirtioFSCache: sconfig.HypervisorConfig.VirtioFSCache,
|
||||||
|
VirtioFSExtraArgs: sconfig.HypervisorConfig.VirtioFSExtraArgs[:],
|
||||||
|
BlockDeviceCacheSet: sconfig.HypervisorConfig.BlockDeviceCacheSet,
|
||||||
|
BlockDeviceCacheDirect: sconfig.HypervisorConfig.BlockDeviceCacheDirect,
|
||||||
|
BlockDeviceCacheNoflush: sconfig.HypervisorConfig.BlockDeviceCacheNoflush,
|
||||||
|
DisableBlockDeviceUse: sconfig.HypervisorConfig.DisableBlockDeviceUse,
|
||||||
|
EnableIOThreads: sconfig.HypervisorConfig.EnableIOThreads,
|
||||||
|
Debug: sconfig.HypervisorConfig.Debug,
|
||||||
|
MemPrealloc: sconfig.HypervisorConfig.MemPrealloc,
|
||||||
|
HugePages: sconfig.HypervisorConfig.HugePages,
|
||||||
|
FileBackedMemRootDir: sconfig.HypervisorConfig.FileBackedMemRootDir,
|
||||||
|
Realtime: sconfig.HypervisorConfig.Realtime,
|
||||||
|
Mlock: sconfig.HypervisorConfig.Mlock,
|
||||||
|
DisableNestingChecks: sconfig.HypervisorConfig.DisableNestingChecks,
|
||||||
|
UseVSock: sconfig.HypervisorConfig.UseVSock,
|
||||||
|
HotplugVFIOOnRootBus: sconfig.HypervisorConfig.HotplugVFIOOnRootBus,
|
||||||
|
BootToBeTemplate: sconfig.HypervisorConfig.BootToBeTemplate,
|
||||||
|
BootFromTemplate: sconfig.HypervisorConfig.BootFromTemplate,
|
||||||
|
DisableVhostNet: sconfig.HypervisorConfig.DisableVhostNet,
|
||||||
|
GuestHookPath: sconfig.HypervisorConfig.GuestHookPath,
|
||||||
|
VMid: sconfig.HypervisorConfig.VMid,
|
||||||
|
}
|
||||||
|
|
||||||
|
if sconfig.AgentType == "kata" {
|
||||||
|
var sagent KataAgentConfig
|
||||||
|
err := mapstructure.Decode(sconfig.AgentConfig, &sagent)
|
||||||
|
if err != nil {
|
||||||
|
s.Logger().WithError(err).Error("internal error: KataAgentConfig failed to decode")
|
||||||
|
} else {
|
||||||
|
ss.Config.KataAgentConfig = &persistapi.KataAgentConfig{
|
||||||
|
LongLiveConn: sagent.LongLiveConn,
|
||||||
|
UseVSock: sagent.UseVSock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sconfig.ShimType == "kataShim" {
|
||||||
|
var shim ShimConfig
|
||||||
|
err := mapstructure.Decode(sconfig.ShimConfig, &shim)
|
||||||
|
if err != nil {
|
||||||
|
s.Logger().WithError(err).Error("internal error: ShimConfig failed to decode")
|
||||||
|
} else {
|
||||||
|
ss.Config.KataShimConfig = &persistapi.ShimConfig{
|
||||||
|
Path: shim.Path,
|
||||||
|
Debug: shim.Debug,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, contConf := range sconfig.Containers {
|
||||||
|
ss.Config.ContainerConfigs = append(ss.Config.ContainerConfigs, persistapi.ContainerConfig{
|
||||||
|
ID: contConf.ID,
|
||||||
|
Annotations: contConf.Annotations,
|
||||||
|
RootFs: contConf.RootFs.Target,
|
||||||
|
Resources: contConf.Resources,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Sandbox) Save() error {
|
func (s *Sandbox) Save() error {
|
||||||
var (
|
var (
|
||||||
ss = persistapi.SandboxState{}
|
ss = persistapi.SandboxState{}
|
||||||
@ -185,6 +299,7 @@ func (s *Sandbox) Save() error {
|
|||||||
s.dumpMounts(cs)
|
s.dumpMounts(cs)
|
||||||
s.dumpAgent(&ss)
|
s.dumpAgent(&ss)
|
||||||
s.dumpNetwork(&ss)
|
s.dumpNetwork(&ss)
|
||||||
|
s.dumpConfig(&ss)
|
||||||
|
|
||||||
if err := s.newStore.ToDisk(ss, cs); err != nil {
|
if err := s.newStore.ToDisk(ss, cs); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -335,3 +450,119 @@ func (s *Sandbox) supportNewStore() bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loadSandboxConfig(id string) (*SandboxConfig, error) {
|
||||||
|
store, err := persist.GetDriver("fs")
|
||||||
|
if err != nil || store == nil {
|
||||||
|
return nil, errors.New("failed to get fs persist driver")
|
||||||
|
}
|
||||||
|
|
||||||
|
ss, _, err := store.FromDisk(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
savedConf := ss.Config
|
||||||
|
sconfig := &SandboxConfig{
|
||||||
|
ID: id,
|
||||||
|
HypervisorType: HypervisorType(savedConf.HypervisorType),
|
||||||
|
AgentType: AgentType(savedConf.AgentType),
|
||||||
|
ProxyType: ProxyType(savedConf.ProxyType),
|
||||||
|
ProxyConfig: ProxyConfig{
|
||||||
|
Path: savedConf.ProxyConfig.Path,
|
||||||
|
Debug: savedConf.ProxyConfig.Debug,
|
||||||
|
},
|
||||||
|
ShimType: ShimType(savedConf.ShimType),
|
||||||
|
NetworkConfig: NetworkConfig{
|
||||||
|
NetNSPath: savedConf.NetworkConfig.NetNSPath,
|
||||||
|
NetNsCreated: savedConf.NetworkConfig.NetNsCreated,
|
||||||
|
DisableNewNetNs: savedConf.NetworkConfig.DisableNewNetNs,
|
||||||
|
InterworkingModel: NetInterworkingModel(savedConf.NetworkConfig.InterworkingModel),
|
||||||
|
},
|
||||||
|
|
||||||
|
ShmSize: savedConf.ShmSize,
|
||||||
|
SharePidNs: savedConf.SharePidNs,
|
||||||
|
Stateful: savedConf.Stateful,
|
||||||
|
SystemdCgroup: savedConf.SystemdCgroup,
|
||||||
|
SandboxCgroupOnly: savedConf.SandboxCgroupOnly,
|
||||||
|
DisableGuestSeccomp: savedConf.DisableGuestSeccomp,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, name := range savedConf.Experimental {
|
||||||
|
sconfig.Experimental = append(sconfig.Experimental, *exp.Get(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
hconf := savedConf.HypervisorConfig
|
||||||
|
sconfig.HypervisorConfig = HypervisorConfig{
|
||||||
|
NumVCPUs: hconf.NumVCPUs,
|
||||||
|
DefaultMaxVCPUs: hconf.DefaultMaxVCPUs,
|
||||||
|
MemorySize: hconf.MemorySize,
|
||||||
|
DefaultBridges: hconf.DefaultBridges,
|
||||||
|
Msize9p: hconf.Msize9p,
|
||||||
|
MemSlots: hconf.MemSlots,
|
||||||
|
MemOffset: hconf.MemOffset,
|
||||||
|
VirtioFSCacheSize: hconf.VirtioFSCacheSize,
|
||||||
|
KernelPath: hconf.KernelPath,
|
||||||
|
ImagePath: hconf.ImagePath,
|
||||||
|
InitrdPath: hconf.InitrdPath,
|
||||||
|
FirmwarePath: hconf.FirmwarePath,
|
||||||
|
MachineAccelerators: hconf.MachineAccelerators,
|
||||||
|
HypervisorPath: hconf.HypervisorPath,
|
||||||
|
HypervisorCtlPath: hconf.HypervisorCtlPath,
|
||||||
|
JailerPath: hconf.JailerPath,
|
||||||
|
BlockDeviceDriver: hconf.BlockDeviceDriver,
|
||||||
|
HypervisorMachineType: hconf.HypervisorMachineType,
|
||||||
|
MemoryPath: hconf.MemoryPath,
|
||||||
|
DevicesStatePath: hconf.DevicesStatePath,
|
||||||
|
EntropySource: hconf.EntropySource,
|
||||||
|
SharedFS: hconf.SharedFS,
|
||||||
|
VirtioFSDaemon: hconf.VirtioFSDaemon,
|
||||||
|
VirtioFSCache: hconf.VirtioFSCache,
|
||||||
|
VirtioFSExtraArgs: hconf.VirtioFSExtraArgs[:],
|
||||||
|
BlockDeviceCacheSet: hconf.BlockDeviceCacheSet,
|
||||||
|
BlockDeviceCacheDirect: hconf.BlockDeviceCacheDirect,
|
||||||
|
BlockDeviceCacheNoflush: hconf.BlockDeviceCacheNoflush,
|
||||||
|
DisableBlockDeviceUse: hconf.DisableBlockDeviceUse,
|
||||||
|
EnableIOThreads: hconf.EnableIOThreads,
|
||||||
|
Debug: hconf.Debug,
|
||||||
|
MemPrealloc: hconf.MemPrealloc,
|
||||||
|
HugePages: hconf.HugePages,
|
||||||
|
FileBackedMemRootDir: hconf.FileBackedMemRootDir,
|
||||||
|
Realtime: hconf.Realtime,
|
||||||
|
Mlock: hconf.Mlock,
|
||||||
|
DisableNestingChecks: hconf.DisableNestingChecks,
|
||||||
|
UseVSock: hconf.UseVSock,
|
||||||
|
HotplugVFIOOnRootBus: hconf.HotplugVFIOOnRootBus,
|
||||||
|
BootToBeTemplate: hconf.BootToBeTemplate,
|
||||||
|
BootFromTemplate: hconf.BootFromTemplate,
|
||||||
|
DisableVhostNet: hconf.DisableVhostNet,
|
||||||
|
GuestHookPath: hconf.GuestHookPath,
|
||||||
|
VMid: hconf.VMid,
|
||||||
|
}
|
||||||
|
|
||||||
|
if savedConf.AgentType == "kata" {
|
||||||
|
sconfig.AgentConfig = KataAgentConfig{
|
||||||
|
LongLiveConn: savedConf.KataAgentConfig.LongLiveConn,
|
||||||
|
UseVSock: savedConf.KataAgentConfig.UseVSock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if savedConf.ShimType == "kataShim" {
|
||||||
|
sconfig.ShimConfig = ShimConfig{
|
||||||
|
Path: savedConf.KataShimConfig.Path,
|
||||||
|
Debug: savedConf.KataShimConfig.Debug,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, contConf := range savedConf.ContainerConfigs {
|
||||||
|
sconfig.Containers = append(sconfig.Containers, ContainerConfig{
|
||||||
|
ID: contConf.ID,
|
||||||
|
Annotations: contConf.Annotations,
|
||||||
|
Resources: contConf.Resources,
|
||||||
|
RootFs: RootFs{
|
||||||
|
Target: contConf.RootFs,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return sconfig, nil
|
||||||
|
}
|
||||||
|
@ -6,17 +6,9 @@
|
|||||||
|
|
||||||
package persistapi
|
package persistapi
|
||||||
|
|
||||||
// Param is a key/value representation for hypervisor and kernel parameters.
|
import (
|
||||||
type Param struct {
|
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||||
Key string
|
)
|
||||||
Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asset saves hypervisor asset
|
|
||||||
type Asset struct {
|
|
||||||
Path string `json:"path"`
|
|
||||||
Custom bool `json:"bool"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HypervisorConfig saves configurations of sandbox hypervisor
|
// HypervisorConfig saves configurations of sandbox hypervisor
|
||||||
type HypervisorConfig struct {
|
type HypervisorConfig struct {
|
||||||
@ -42,11 +34,8 @@ type HypervisorConfig struct {
|
|||||||
// MemOffset specifies memory space for nvdimm device
|
// MemOffset specifies memory space for nvdimm device
|
||||||
MemOffset uint32
|
MemOffset uint32
|
||||||
|
|
||||||
// KernelParams are additional guest kernel parameters.
|
// VirtioFSCacheSize is the DAX cache size in MiB
|
||||||
KernelParams []Param
|
VirtioFSCacheSize uint32
|
||||||
|
|
||||||
// HypervisorParams are additional hypervisor parameters.
|
|
||||||
HypervisorParams []Param
|
|
||||||
|
|
||||||
// KernelPath is the guest kernel host path.
|
// KernelPath is the guest kernel host path.
|
||||||
KernelPath string
|
KernelPath string
|
||||||
@ -67,6 +56,12 @@ type HypervisorConfig struct {
|
|||||||
// HypervisorPath is the hypervisor executable host path.
|
// HypervisorPath is the hypervisor executable host path.
|
||||||
HypervisorPath string
|
HypervisorPath string
|
||||||
|
|
||||||
|
// HypervisorCtlPath is the hypervisor ctl executable host path.
|
||||||
|
HypervisorCtlPath string
|
||||||
|
|
||||||
|
// JailerPath is the jailer executable host path.
|
||||||
|
JailerPath string
|
||||||
|
|
||||||
// BlockDeviceDriver specifies the driver to be used for block device
|
// BlockDeviceDriver specifies the driver to be used for block device
|
||||||
// either VirtioSCSI or VirtioBlock with the default driver being defaultBlockDriver
|
// either VirtioSCSI or VirtioBlock with the default driver being defaultBlockDriver
|
||||||
BlockDeviceDriver string
|
BlockDeviceDriver string
|
||||||
@ -87,11 +82,19 @@ type HypervisorConfig struct {
|
|||||||
// entropy (/dev/random, /dev/urandom or real hardware RNG device)
|
// entropy (/dev/random, /dev/urandom or real hardware RNG device)
|
||||||
EntropySource string
|
EntropySource string
|
||||||
|
|
||||||
// customAssets is a map of assets.
|
// Shared file system type:
|
||||||
// Each value in that map takes precedence over the configured assets.
|
// - virtio-9p (default)
|
||||||
// For example, if there is a value for the "kernel" key in this map,
|
// - virtio-fs
|
||||||
// it will be used for the sandbox's kernel path instead of KernelPath.
|
SharedFS string
|
||||||
CustomAssets map[string]*Asset
|
|
||||||
|
// VirtioFSDaemon is the virtio-fs vhost-user daemon path
|
||||||
|
VirtioFSDaemon string
|
||||||
|
|
||||||
|
// VirtioFSCache cache mode for fs version cache or "none"
|
||||||
|
VirtioFSCache string
|
||||||
|
|
||||||
|
// VirtioFSExtraArgs passes options to virtiofsd daemon
|
||||||
|
VirtioFSExtraArgs []string
|
||||||
|
|
||||||
// BlockDeviceCacheSet specifies cache-related options will be set to block devices or not.
|
// BlockDeviceCacheSet specifies cache-related options will be set to block devices or not.
|
||||||
BlockDeviceCacheSet bool
|
BlockDeviceCacheSet bool
|
||||||
@ -154,6 +157,10 @@ type HypervisorConfig struct {
|
|||||||
|
|
||||||
// GuestHookPath is the path within the VM that will be used for 'drop-in' hooks
|
// GuestHookPath is the path within the VM that will be used for 'drop-in' hooks
|
||||||
GuestHookPath string
|
GuestHookPath string
|
||||||
|
|
||||||
|
// VMid is the id of the VM that create the hypervisor if the VM is created by the factory.
|
||||||
|
// VMid is "" if the hypervisor is not created by the factory.
|
||||||
|
VMid string
|
||||||
}
|
}
|
||||||
|
|
||||||
// KataAgentConfig is a structure storing information needed
|
// KataAgentConfig is a structure storing information needed
|
||||||
@ -186,6 +193,18 @@ type ShimConfig struct {
|
|||||||
|
|
||||||
// NetworkConfig is the network configuration related to a network.
|
// NetworkConfig is the network configuration related to a network.
|
||||||
type NetworkConfig struct {
|
type NetworkConfig struct {
|
||||||
|
NetNSPath string
|
||||||
|
NetNsCreated bool
|
||||||
|
DisableNewNetNs bool
|
||||||
|
InterworkingModel int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerConfig struct {
|
||||||
|
ID string
|
||||||
|
Annotations map[string]string
|
||||||
|
RootFs string
|
||||||
|
// Resources for recoding update
|
||||||
|
Resources specs.LinuxResources
|
||||||
}
|
}
|
||||||
|
|
||||||
// SandboxConfig is a sandbox configuration.
|
// SandboxConfig is a sandbox configuration.
|
||||||
@ -197,15 +216,13 @@ type SandboxConfig struct {
|
|||||||
// only one agent config can be non-nil according to agent type
|
// only one agent config can be non-nil according to agent type
|
||||||
AgentType string
|
AgentType string
|
||||||
KataAgentConfig *KataAgentConfig `json:",omitempty"`
|
KataAgentConfig *KataAgentConfig `json:",omitempty"`
|
||||||
HyperstartConfig *HyperstartConfig `json:",omitempty"`
|
|
||||||
|
|
||||||
ProxyType string
|
ProxyType string
|
||||||
ProxyConfig ProxyConfig
|
ProxyConfig ProxyConfig
|
||||||
|
|
||||||
ShimType string
|
ShimType string
|
||||||
KataShimConfig ShimConfig
|
KataShimConfig *ShimConfig
|
||||||
|
|
||||||
NetworkModel string
|
|
||||||
NetworkConfig NetworkConfig
|
NetworkConfig NetworkConfig
|
||||||
|
|
||||||
ShmSize uint64
|
ShmSize uint64
|
||||||
@ -220,11 +237,18 @@ type SandboxConfig struct {
|
|||||||
// SystemdCgroup enables systemd cgroup support
|
// SystemdCgroup enables systemd cgroup support
|
||||||
SystemdCgroup bool
|
SystemdCgroup bool
|
||||||
|
|
||||||
|
// SandboxCgroupOnly enables cgroup only at podlevel in the host
|
||||||
|
SandboxCgroupOnly bool
|
||||||
|
|
||||||
|
DisableGuestSeccomp bool
|
||||||
|
|
||||||
// Experimental enables experimental features
|
// Experimental enables experimental features
|
||||||
Experimental bool
|
Experimental []string
|
||||||
|
|
||||||
// Information for fields not saved:
|
// Information for fields not saved:
|
||||||
// * Annotation: this is kind of casual data, we don't need casual data in persist file,
|
// * Annotation: this is kind of casual data, we don't need casual data in persist file,
|
||||||
// if you know this data needs to persist, please gives it
|
// if you know this data needs to persist, please gives it
|
||||||
// a specific field
|
// a specific field
|
||||||
|
|
||||||
|
ContainerConfigs []ContainerConfig
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/kata-containers/runtime/pkg/rootless"
|
||||||
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@ -36,9 +37,15 @@ const storagePathSuffix = "vc"
|
|||||||
// sandboxPathSuffix is the suffix used for sandbox storage
|
// sandboxPathSuffix is the suffix used for sandbox storage
|
||||||
const sandboxPathSuffix = "sbs"
|
const sandboxPathSuffix = "sbs"
|
||||||
|
|
||||||
// runStoragePath is the sandbox runtime directory.
|
// RunStoragePath is the sandbox runtime directory.
|
||||||
// It will contain one state.json and one lock file for each created sandbox.
|
// It will contain one state.json and one lock file for each created sandbox.
|
||||||
var runStoragePath = filepath.Join("/run", storagePathSuffix, sandboxPathSuffix)
|
var RunStoragePath = func() string {
|
||||||
|
path := filepath.Join("/run", storagePathSuffix, sandboxPathSuffix)
|
||||||
|
if rootless.IsRootless() {
|
||||||
|
return filepath.Join(rootless.GetRootlessDir(), path)
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
// FS storage driver implementation
|
// FS storage driver implementation
|
||||||
type FS struct {
|
type FS struct {
|
||||||
@ -76,7 +83,7 @@ func (fs *FS) sandboxDir() (string, error) {
|
|||||||
return "", fmt.Errorf("sandbox container id required")
|
return "", fmt.Errorf("sandbox container id required")
|
||||||
}
|
}
|
||||||
|
|
||||||
return filepath.Join(runStoragePath, id), nil
|
return filepath.Join(RunStoragePath(), id), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToDisk sandboxState and containerState to disk
|
// ToDisk sandboxState and containerState to disk
|
||||||
@ -119,12 +126,22 @@ func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.Contai
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var dirCreationErr error
|
||||||
|
var createdDirs []string
|
||||||
|
defer func() {
|
||||||
|
if dirCreationErr != nil && len(createdDirs) > 0 {
|
||||||
|
for _, dir := range createdDirs {
|
||||||
|
os.RemoveAll(dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
// persist container configuration data
|
// persist container configuration data
|
||||||
for cid, cstate := range fs.containerState {
|
for cid, cstate := range fs.containerState {
|
||||||
cdir := filepath.Join(sandboxDir, cid)
|
cdir := filepath.Join(sandboxDir, cid)
|
||||||
if err := os.MkdirAll(cdir, dirMode); err != nil {
|
if dirCreationErr = os.MkdirAll(cdir, dirMode); dirCreationErr != nil {
|
||||||
return err
|
return dirCreationErr
|
||||||
}
|
}
|
||||||
|
createdDirs = append(createdDirs, cdir)
|
||||||
|
|
||||||
cfile := filepath.Join(cdir, persistFile)
|
cfile := filepath.Join(cdir, persistFile)
|
||||||
cf, err := os.OpenFile(cfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode)
|
cf, err := os.OpenFile(cfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode)
|
||||||
@ -132,10 +149,10 @@ func (fs *FS) ToDisk(ss persistapi.SandboxState, cs map[string]persistapi.Contai
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer cf.Close()
|
||||||
if err := json.NewEncoder(cf).Encode(cstate); err != nil {
|
if err := json.NewEncoder(cf).Encode(cstate); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cf.Close()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -194,11 +211,11 @@ func (fs *FS) FromDisk(sid string) (persistapi.SandboxState, map[string]persista
|
|||||||
return ss, nil, err
|
return ss, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer cf.Close()
|
||||||
var cstate persistapi.ContainerState
|
var cstate persistapi.ContainerState
|
||||||
if err := json.NewDecoder(cf).Decode(&cstate); err != nil {
|
if err := json.NewDecoder(cf).Decode(&cstate); err != nil {
|
||||||
return ss, nil, err
|
return ss, nil, err
|
||||||
}
|
}
|
||||||
cf.Close()
|
|
||||||
|
|
||||||
fs.containerState[cid] = cstate
|
fs.containerState[cid] = cstate
|
||||||
}
|
}
|
||||||
@ -254,8 +271,10 @@ func (fs *FS) unlock() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSetRunStoragePath set runStoragePath to path
|
// TestSetRunStoragePath set RunStoragePath to path
|
||||||
// this function is only used for testing purpose
|
// this function is only used for testing purpose
|
||||||
func TestSetRunStoragePath(path string) {
|
func TestSetRunStoragePath(path string) {
|
||||||
runStoragePath = path
|
RunStoragePath = func() string {
|
||||||
|
return path
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -225,6 +225,9 @@ func (s *Sandbox) Logger() *logrus.Entry {
|
|||||||
|
|
||||||
// Annotations returns any annotation that a user could have stored through the sandbox.
|
// Annotations returns any annotation that a user could have stored through the sandbox.
|
||||||
func (s *Sandbox) Annotations(key string) (string, error) {
|
func (s *Sandbox) Annotations(key string) (string, error) {
|
||||||
|
s.annotationsLock.RLock()
|
||||||
|
defer s.annotationsLock.RUnlock()
|
||||||
|
|
||||||
value, exist := s.config.Annotations[key]
|
value, exist := s.config.Annotations[key]
|
||||||
if !exist {
|
if !exist {
|
||||||
return "", fmt.Errorf("Annotations key %s does not exist", key)
|
return "", fmt.Errorf("Annotations key %s does not exist", key)
|
||||||
@ -242,8 +245,11 @@ func (s *Sandbox) SetAnnotations(annotations map[string]string) error {
|
|||||||
s.config.Annotations[k] = v
|
s.config.Annotations[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !s.supportNewStore() {
|
||||||
return s.store.Store(store.Configuration, *(s.config))
|
return s.store.Store(store.Configuration, *(s.config))
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetAnnotations returns sandbox's annotations
|
// GetAnnotations returns sandbox's annotations
|
||||||
func (s *Sandbox) GetAnnotations() map[string]string {
|
func (s *Sandbox) GetAnnotations() map[string]string {
|
||||||
@ -617,6 +623,12 @@ func (s *Sandbox) storeSandbox() error {
|
|||||||
span, _ := s.trace("storeSandbox")
|
span, _ := s.trace("storeSandbox")
|
||||||
defer span.Finish()
|
defer span.Finish()
|
||||||
|
|
||||||
|
if s.supportNewStore() {
|
||||||
|
// flush data to storage
|
||||||
|
if err := s.Save(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
err := s.store.Store(store.Configuration, *(s.config))
|
err := s.store.Store(store.Configuration, *(s.config))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -628,14 +640,7 @@ func (s *Sandbox) storeSandbox() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.supportNewStore() {
|
|
||||||
// flush data to storage
|
|
||||||
if err := s.Save(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -673,6 +678,22 @@ func unlockSandbox(ctx context.Context, sandboxID, token string) error {
|
|||||||
return store.Unlock(token)
|
return store.Unlock(token)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func supportNewStore(ctx context.Context) bool {
|
||||||
|
if exp.Get("newstore") == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if client context enabled "newstore" feature
|
||||||
|
exps := exp.ExpFromContext(ctx)
|
||||||
|
for _, v := range exps {
|
||||||
|
if v == "newstore" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox.
|
// fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox.
|
||||||
func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) {
|
func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) {
|
||||||
virtLog.Info("fetch sandbox")
|
virtLog.Info("fetch sandbox")
|
||||||
@ -685,16 +706,25 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
|
|||||||
return sandbox, err
|
return sandbox, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var config SandboxConfig
|
||||||
|
|
||||||
|
if supportNewStore(ctx) {
|
||||||
|
c, err := loadSandboxConfig(sandboxID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
config = *c
|
||||||
|
} else {
|
||||||
// We're bootstrapping
|
// We're bootstrapping
|
||||||
vcStore, err := store.NewVCSandboxStore(ctx, sandboxID)
|
vcStore, err := store.NewVCSandboxStore(ctx, sandboxID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var config SandboxConfig
|
|
||||||
if err := vcStore.Load(store.Configuration, &config); err != nil {
|
if err := vcStore.Load(store.Configuration, &config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// fetchSandbox is not suppose to create new sandbox VM.
|
// fetchSandbox is not suppose to create new sandbox VM.
|
||||||
sandbox, err = createSandbox(ctx, config, nil)
|
sandbox, err = createSandbox(ctx, config, nil)
|
||||||
@ -1493,13 +1523,21 @@ func (s *Sandbox) Start() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prevState := s.state.State
|
||||||
|
|
||||||
if err := s.setSandboxState(types.StateRunning); err != nil {
|
if err := s.setSandboxState(types.StateRunning); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var startErr error
|
||||||
|
defer func() {
|
||||||
|
if startErr != nil {
|
||||||
|
s.setSandboxState(prevState)
|
||||||
|
}
|
||||||
|
}()
|
||||||
for _, c := range s.containers {
|
for _, c := range s.containers {
|
||||||
if err := c.start(); err != nil {
|
if startErr = c.start(); startErr != nil {
|
||||||
return err
|
return startErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,6 +188,7 @@ func (s *VSock) String() string {
|
|||||||
// Firecracker supports it.
|
// Firecracker supports it.
|
||||||
type HybridVSock struct {
|
type HybridVSock struct {
|
||||||
UdsPath string
|
UdsPath string
|
||||||
|
ContextID uint64
|
||||||
Port uint32
|
Port uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user