tracing: Add tracing support to virtcontainers

Add additional `context.Context` parameters and `struct` fields to allow
trace spans to be created by the `virtcontainers` internal functions,
objects and sub-packages.

Note that not every function is traced; we can add more traces as
desired.

Fixes #566.

Signed-off-by: James O. D. Hunt <james.o.hunt@intel.com>
This commit is contained in:
James O. D. Hunt 2018-08-21 10:53:31 +01:00
parent 6ddc9b4e8e
commit d0679a6fd1
40 changed files with 658 additions and 200 deletions

View File

@ -128,10 +128,10 @@ func create(ctx context.Context, containerID, bundlePath, console, pidFilePath s
}, },
} }
kataLog.WithField("factory", factoryConfig).Info("load vm factory") kataLog.WithField("factory", factoryConfig).Info("load vm factory")
f, err := vf.NewFactory(factoryConfig, true) f, err := vf.NewFactory(ctx, factoryConfig, true)
if err != nil { if err != nil {
kataLog.WithError(err).Warn("load vm factory failed, about to create new one") kataLog.WithError(err).Warn("load vm factory failed, about to create new one")
f, err = vf.NewFactory(factoryConfig, false) f, err = vf.NewFactory(ctx, factoryConfig, false)
if err != nil { if err != nil {
kataLog.WithError(err).Warn("create vm factory failed") kataLog.WithError(err).Warn("create vm factory failed")
} }

View File

@ -33,8 +33,13 @@ var factoryCLICommand = cli.Command{
var initFactoryCommand = cli.Command{ var initFactoryCommand = cli.Command{
Name: "init", Name: "init",
Usage: "initialize a VM factory based on kata-runtime configuration", Usage: "initialize a VM factory based on kata-runtime configuration",
Action: func(context *cli.Context) error { Action: func(c *cli.Context) error {
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig) ctx, err := cliContextToContext(c)
if err != nil {
return err
}
runtimeConfig, ok := c.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok { if !ok {
return errors.New("invalid runtime config") return errors.New("invalid runtime config")
} }
@ -50,7 +55,7 @@ var initFactoryCommand = cli.Command{
}, },
} }
kataLog.WithField("factory", factoryConfig).Info("create vm factory") kataLog.WithField("factory", factoryConfig).Info("create vm factory")
_, err := vf.NewFactory(factoryConfig, false) _, err := vf.NewFactory(ctx, factoryConfig, false)
if err != nil { if err != nil {
kataLog.WithError(err).Error("create vm factory failed") kataLog.WithError(err).Error("create vm factory failed")
return err return err
@ -68,8 +73,13 @@ var initFactoryCommand = cli.Command{
var destroyFactoryCommand = cli.Command{ var destroyFactoryCommand = cli.Command{
Name: "destroy", Name: "destroy",
Usage: "destroy the VM factory", Usage: "destroy the VM factory",
Action: func(context *cli.Context) error { Action: func(c *cli.Context) error {
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig) ctx, err := cliContextToContext(c)
if err != nil {
return err
}
runtimeConfig, ok := c.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok { if !ok {
return errors.New("invalid runtime config") return errors.New("invalid runtime config")
} }
@ -85,12 +95,12 @@ var destroyFactoryCommand = cli.Command{
}, },
} }
kataLog.WithField("factory", factoryConfig).Info("load vm factory") kataLog.WithField("factory", factoryConfig).Info("load vm factory")
f, err := vf.NewFactory(factoryConfig, true) f, err := vf.NewFactory(ctx, factoryConfig, true)
if err != nil { if err != nil {
kataLog.WithError(err).Error("load vm factory failed") kataLog.WithError(err).Error("load vm factory failed")
// ignore error // ignore error
} else { } else {
f.CloseFactory() f.CloseFactory(ctx)
} }
} }
fmt.Fprintln(defaultOutputFile, "vm factory destroyed") fmt.Fprintln(defaultOutputFile, "vm factory destroyed")
@ -101,8 +111,13 @@ var destroyFactoryCommand = cli.Command{
var statusFactoryCommand = cli.Command{ var statusFactoryCommand = cli.Command{
Name: "status", Name: "status",
Usage: "query the status of VM factory", Usage: "query the status of VM factory",
Action: func(context *cli.Context) error { Action: func(c *cli.Context) error {
runtimeConfig, ok := context.App.Metadata["runtimeConfig"].(oci.RuntimeConfig) ctx, err := cliContextToContext(c)
if err != nil {
return err
}
runtimeConfig, ok := c.App.Metadata["runtimeConfig"].(oci.RuntimeConfig)
if !ok { if !ok {
return errors.New("invalid runtime config") return errors.New("invalid runtime config")
} }
@ -118,11 +133,11 @@ var statusFactoryCommand = cli.Command{
}, },
} }
kataLog.WithField("factory", factoryConfig).Info("load vm factory") kataLog.WithField("factory", factoryConfig).Info("load vm factory")
f, err := vf.NewFactory(factoryConfig, true) f, err := vf.NewFactory(ctx, factoryConfig, true)
if err != nil { if err != nil {
fmt.Fprintln(defaultOutputFile, "vm factory is off") fmt.Fprintln(defaultOutputFile, "vm factory is off")
} else { } else {
f.CloseFactory() f.CloseFactory(ctx)
fmt.Fprintln(defaultOutputFile, "vm factory is on") fmt.Fprintln(defaultOutputFile, "vm factory is on")
} }
} else { } else {

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"syscall" "syscall"
@ -129,7 +130,7 @@ type agent interface {
// init(). // init().
// After init() is called, agent implementations should be initialized and ready // After init() is called, agent implementations should be initialized and ready
// to handle all other Agent interface methods. // to handle all other Agent interface methods.
init(sandbox *Sandbox, config interface{}) error init(ctx context.Context, sandbox *Sandbox, config interface{}) error
// capabilities should return a structure that specifies the capabilities // capabilities should return a structure that specifies the capabilities
// supported by the agent. // supported by the agent.

View File

@ -40,6 +40,14 @@ func trace(parent context.Context, name string) (opentracing.Span, context.Conte
return span, ctx return span, ctx
} }
func traceWithSubsys(ctx context.Context, subsys, name string) (opentracing.Span, context.Context) {
span, ctx := opentracing.StartSpanFromContext(ctx, name)
span.SetTag("subsystem", subsys)
return span, ctx
}
// SetLogger sets the logger for virtcontainers package. // SetLogger sets the logger for virtcontainers package.
func SetLogger(ctx context.Context, logger *logrus.Entry) { func SetLogger(ctx context.Context, logger *logrus.Entry) {
fields := virtLog.Data fields := virtLog.Data
@ -54,7 +62,7 @@ func CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
span, ctx := trace(ctx, "CreateSandbox") span, ctx := trace(ctx, "CreateSandbox")
defer span.Finish() defer span.Finish()
s, err := createSandboxFromConfig(sandboxConfig, factory) s, err := createSandboxFromConfig(ctx, sandboxConfig, factory)
if err == nil { if err == nil {
s.releaseStatelessSandbox() s.releaseStatelessSandbox()
} }
@ -62,11 +70,14 @@ func CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
return s, err return s, err
} }
func createSandboxFromConfig(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) {
span, ctx := trace(ctx, "createSandboxFromConfig")
defer span.Finish()
var err error var err error
// Create the sandbox. // Create the sandbox.
s, err := createSandbox(sandboxConfig, factory) s, err := createSandbox(ctx, sandboxConfig, factory)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -139,7 +150,7 @@ func DeleteSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
// Fetch the sandbox from storage and create it. // Fetch the sandbox from storage and create it.
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -172,7 +183,7 @@ func FetchSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
// Fetch the sandbox from storage and create it. // Fetch the sandbox from storage and create it.
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -209,7 +220,7 @@ func StartSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
// Fetch the sandbox from storage and create it. // Fetch the sandbox from storage and create it.
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -250,7 +261,7 @@ func StopSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
// Fetch the sandbox from storage and create it. // Fetch the sandbox from storage and create it.
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -281,7 +292,7 @@ func RunSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
span, ctx := trace(ctx, "RunSandbox") span, ctx := trace(ctx, "RunSandbox")
defer span.Finish() defer span.Finish()
s, err := createSandboxFromConfig(sandboxConfig, factory) s, err := createSandboxFromConfig(ctx, sandboxConfig, factory)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -345,7 +356,7 @@ func StatusSandbox(ctx context.Context, sandboxID string) (SandboxStatus, error)
return SandboxStatus{}, err return SandboxStatus{}, err
} }
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
unlockSandbox(lockFile) unlockSandbox(lockFile)
return SandboxStatus{}, err return SandboxStatus{}, err
@ -401,7 +412,7 @@ func CreateContainer(ctx context.Context, sandboxID string, containerConfig Cont
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -433,7 +444,7 @@ func DeleteContainer(ctx context.Context, sandboxID, containerID string) (VCCont
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -459,7 +470,7 @@ func StartContainer(ctx context.Context, sandboxID, containerID string) (VCConta
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -493,7 +504,7 @@ func StopContainer(ctx context.Context, sandboxID, containerID string) (VCContai
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -534,7 +545,7 @@ func EnterContainer(ctx context.Context, sandboxID, containerID string, cmd Cmd)
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@ -567,7 +578,7 @@ func StatusContainer(ctx context.Context, sandboxID, containerID string) (Contai
return ContainerStatus{}, err return ContainerStatus{}, err
} }
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
unlockSandbox(lockFile) unlockSandbox(lockFile)
return ContainerStatus{}, err return ContainerStatus{}, err
@ -658,7 +669,7 @@ func KillContainer(ctx context.Context, sandboxID, containerID string, signal sy
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return err return err
} }
@ -685,7 +696,7 @@ func PauseSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
span, ctx := trace(ctx, "PauseSandbox") span, ctx := trace(ctx, "PauseSandbox")
defer span.Finish() defer span.Finish()
return togglePauseSandbox(sandboxID, true) return togglePauseSandbox(ctx, sandboxID, true)
} }
// ResumeSandbox is the virtcontainers resuming entry point which resumes // ResumeSandbox is the virtcontainers resuming entry point which resumes
@ -694,7 +705,7 @@ func ResumeSandbox(ctx context.Context, sandboxID string) (VCSandbox, error) {
span, ctx := trace(ctx, "ResumeSandbox") span, ctx := trace(ctx, "ResumeSandbox")
defer span.Finish() defer span.Finish()
return togglePauseSandbox(sandboxID, false) return togglePauseSandbox(ctx, sandboxID, false)
} }
// ProcessListContainer is the virtcontainers entry point to list // ProcessListContainer is the virtcontainers entry point to list
@ -717,7 +728,7 @@ func ProcessListContainer(ctx context.Context, sandboxID, containerID string, op
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -752,7 +763,7 @@ func UpdateContainer(ctx context.Context, sandboxID, containerID string, resourc
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return err return err
} }
@ -781,7 +792,7 @@ func StatsContainer(ctx context.Context, sandboxID, containerID string) (Contain
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return ContainerStats{}, err return ContainerStats{}, err
} }
@ -790,7 +801,7 @@ func StatsContainer(ctx context.Context, sandboxID, containerID string) (Contain
return s.StatsContainer(containerID) return s.StatsContainer(containerID)
} }
func togglePauseContainer(sandboxID, containerID string, pause bool) error { func togglePauseContainer(ctx context.Context, sandboxID, containerID string, pause bool) error {
if sandboxID == "" { if sandboxID == "" {
return errNeedSandboxID return errNeedSandboxID
} }
@ -805,7 +816,7 @@ func togglePauseContainer(sandboxID, containerID string, pause bool) error {
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return err return err
} }
@ -829,7 +840,7 @@ func PauseContainer(ctx context.Context, sandboxID, containerID string) error {
span, ctx := trace(ctx, "PauseContainer") span, ctx := trace(ctx, "PauseContainer")
defer span.Finish() defer span.Finish()
return togglePauseContainer(sandboxID, containerID, true) return togglePauseContainer(ctx, sandboxID, containerID, true)
} }
// ResumeContainer is the virtcontainers container resume entry point. // ResumeContainer is the virtcontainers container resume entry point.
@ -837,7 +848,7 @@ func ResumeContainer(ctx context.Context, sandboxID, containerID string) error {
span, ctx := trace(ctx, "ResumeContainer") span, ctx := trace(ctx, "ResumeContainer")
defer span.Finish() defer span.Finish()
return togglePauseContainer(sandboxID, containerID, false) return togglePauseContainer(ctx, sandboxID, containerID, false)
} }
// AddDevice will add a device to sandbox // AddDevice will add a device to sandbox
@ -852,7 +863,7 @@ func AddDevice(ctx context.Context, sandboxID string, info deviceConfig.DeviceIn
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -872,7 +883,7 @@ func toggleInterface(ctx context.Context, sandboxID string, inf *grpc.Interface,
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -904,7 +915,7 @@ func ListInterfaces(ctx context.Context, sandboxID string) ([]*grpc.Interface, e
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -924,7 +935,7 @@ func UpdateRoutes(ctx context.Context, sandboxID string, routes []*grpc.Route) (
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -943,7 +954,7 @@ func ListRoutes(ctx context.Context, sandboxID string) ([]*grpc.Route, error) {
} }
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -491,7 +491,7 @@ func TestStartSandboxHyperstartAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox) pImpl, ok := p.(*Sandbox)
assert.True(t, ok) assert.True(t, ok)
bindUnmountAllRootfs(defaultSharedDir, pImpl) bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
} }
func TestStartSandboxKataAgentSuccessful(t *testing.T) { func TestStartSandboxKataAgentSuccessful(t *testing.T) {
@ -532,7 +532,7 @@ func TestStartSandboxKataAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox) pImpl, ok := p.(*Sandbox)
assert.True(t, ok) assert.True(t, ok)
bindUnmountAllRootfs(defaultSharedDir, pImpl) bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
} }
func TestStartSandboxFailing(t *testing.T) { func TestStartSandboxFailing(t *testing.T) {
@ -770,7 +770,7 @@ func TestRunSandboxHyperstartAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox) pImpl, ok := p.(*Sandbox)
assert.True(t, ok) assert.True(t, ok)
bindUnmountAllRootfs(defaultSharedDir, pImpl) bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
} }
func TestRunSandboxKataAgentSuccessful(t *testing.T) { func TestRunSandboxKataAgentSuccessful(t *testing.T) {
@ -817,7 +817,7 @@ func TestRunSandboxKataAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox) pImpl, ok := p.(*Sandbox)
assert.True(t, ok) assert.True(t, ok)
bindUnmountAllRootfs(defaultSharedDir, pImpl) bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
} }
func TestRunSandboxFailing(t *testing.T) { func TestRunSandboxFailing(t *testing.T) {
@ -1380,7 +1380,7 @@ func TestStartStopContainerHyperstartAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox) pImpl, ok := p.(*Sandbox)
assert.True(t, ok) assert.True(t, ok)
bindUnmountAllRootfs(defaultSharedDir, pImpl) bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
} }
func TestStartStopSandboxHyperstartAgentSuccessfulWithCNMNetwork(t *testing.T) { func TestStartStopSandboxHyperstartAgentSuccessfulWithCNMNetwork(t *testing.T) {
@ -1608,7 +1608,7 @@ func TestEnterContainerHyperstartAgentSuccessful(t *testing.T) {
pImpl, ok := p.(*Sandbox) pImpl, ok := p.(*Sandbox)
assert.True(t, ok) assert.True(t, ok)
bindUnmountAllRootfs(defaultSharedDir, pImpl) bindUnmountAllRootfs(ctx, defaultSharedDir, pImpl)
} }
func TestEnterContainerFailingNoSandbox(t *testing.T) { func TestEnterContainerFailingNoSandbox(t *testing.T) {
@ -1770,7 +1770,7 @@ func TestStatusContainerStateReady(t *testing.T) {
} }
// fresh lookup // fresh lookup
p2, err := fetchSandbox(p.ID()) p2, err := fetchSandbox(ctx, p.ID())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1845,7 +1845,7 @@ func TestStatusContainerStateRunning(t *testing.T) {
} }
// fresh lookup // fresh lookup
p2, err := fetchSandbox(p.ID()) p2, err := fetchSandbox(ctx, p.ID())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -6,29 +6,59 @@
package virtcontainers package virtcontainers
import ( import (
"context"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// cnm is a network implementation for the CNM plugin. // cnm is a network implementation for the CNM plugin.
type cnm struct { type cnm struct {
ctx context.Context
} }
func cnmLogger() *logrus.Entry { func (n *cnm) Logger() *logrus.Entry {
return virtLog.WithField("subsystem", "cnm") return virtLog.WithField("subsystem", "cnm")
} }
func (n *cnm) trace(name string) (opentracing.Span, context.Context) {
if n.ctx == nil {
n.Logger().WithField("type", "bug").Error("trace called before context set")
n.ctx = context.Background()
}
span, ctx := opentracing.StartSpanFromContext(n.ctx, name)
span.SetTag("subsystem", "network")
span.SetTag("type", "cnm")
return span, ctx
}
// init initializes the network, setting a new network namespace for the CNM network. // init initializes the network, setting a new network namespace for the CNM network.
func (n *cnm) init(config NetworkConfig) (string, bool, error) { func (n *cnm) init(ctx context.Context, config NetworkConfig) (string, bool, error) {
// Set context
n.ctx = ctx
span, _ := n.trace("init")
defer span.Finish()
return initNetworkCommon(config) return initNetworkCommon(config)
} }
// run runs a callback in the specified network namespace. // run runs a callback in the specified network namespace.
func (n *cnm) run(networkNSPath string, cb func() error) error { func (n *cnm) run(networkNSPath string, cb func() error) error {
span, _ := n.trace("run")
defer span.Finish()
return runNetworkCommon(networkNSPath, cb) return runNetworkCommon(networkNSPath, cb)
} }
// add adds all needed interfaces inside the network namespace for the CNM network. // add adds all needed interfaces inside the network namespace for the CNM network.
func (n *cnm) add(sandbox *Sandbox, config NetworkConfig, netNsPath string, netNsCreated bool) (NetworkNamespace, error) { func (n *cnm) add(sandbox *Sandbox, config NetworkConfig, netNsPath string, netNsCreated bool) (NetworkNamespace, error) {
span, _ := n.trace("add")
defer span.Finish()
endpoints, err := createEndpointsFromScan(netNsPath, config) endpoints, err := createEndpointsFromScan(netNsPath, config)
if err != nil { if err != nil {
return NetworkNamespace{}, err return NetworkNamespace{}, err
@ -50,6 +80,18 @@ func (n *cnm) add(sandbox *Sandbox, config NetworkConfig, netNsPath string, netN
// remove network endpoints in the network namespace. It also deletes the network // remove network endpoints in the network namespace. It also deletes the network
// namespace in case the namespace has been created by us. // namespace in case the namespace has been created by us.
func (n *cnm) remove(sandbox *Sandbox, networkNS NetworkNamespace, netNsCreated bool) error { func (n *cnm) remove(sandbox *Sandbox, networkNS NetworkNamespace, netNsCreated bool) error {
// Set the context again.
//
// This is required since when deleting networks, the init() method is
// not called since the network config state is simply read from disk.
// However, the context part of that state is not stored fully since
// context.Context is an interface type meaning all the trace metadata
// stored in the on-disk network config file is missing.
n.ctx = sandbox.ctx
span, _ := n.trace("remove")
defer span.Finish()
if err := removeNetworkCommon(networkNS, netNsCreated); err != nil { if err := removeNetworkCommon(networkNS, netNsCreated); err != nil {
return err return err
} }

View File

@ -7,6 +7,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
@ -17,6 +18,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
@ -261,6 +263,8 @@ type Container struct {
devices []ContainerDevice devices []ContainerDevice
systemMountsInfo SystemMountsInfo systemMountsInfo SystemMountsInfo
ctx context.Context
} }
// ID returns the container identifier string. // ID returns the container identifier string.
@ -276,6 +280,19 @@ func (c *Container) Logger() *logrus.Entry {
}) })
} }
func (c *Container) trace(name string) (opentracing.Span, context.Context) {
if c.ctx == nil {
c.Logger().WithField("type", "bug").Error("trace called before context set")
c.ctx = context.Background()
}
span, ctx := opentracing.StartSpanFromContext(c.ctx, name)
span.SetTag("subsystem", "container")
return span, ctx
}
// Sandbox returns the sandbox handler related to this container. // Sandbox returns the sandbox handler related to this container.
func (c *Container) Sandbox() VCSandbox { func (c *Container) Sandbox() VCSandbox {
return c.sandbox return c.sandbox
@ -468,7 +485,7 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (
filename := fmt.Sprintf("%s-%s-%s", c.id, hex.EncodeToString(randBytes), filepath.Base(m.Destination)) filename := fmt.Sprintf("%s-%s-%s", c.id, hex.EncodeToString(randBytes), filepath.Base(m.Destination))
mountDest := filepath.Join(hostSharedDir, c.sandbox.id, filename) mountDest := filepath.Join(hostSharedDir, c.sandbox.id, filename)
if err := bindMount(m.Source, mountDest, false); err != nil { if err := bindMount(c.ctx, m.Source, mountDest, false); err != nil {
return nil, err return nil, err
} }
@ -503,8 +520,15 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, guestSharedDir string) (
} }
func (c *Container) unmountHostMounts() error { func (c *Container) unmountHostMounts() error {
var span opentracing.Span
span, c.ctx = c.trace("unmountHostMounts")
defer span.Finish()
for _, m := range c.mounts { for _, m := range c.mounts {
if m.HostPath != "" { if m.HostPath != "" {
span, _ := c.trace("unmount")
span.SetTag("host-path", m.HostPath)
logger := c.Logger().WithField("host-path", m.HostPath) logger := c.Logger().WithField("host-path", m.HostPath)
if err := syscall.Unmount(m.HostPath, 0); err != nil { if err := syscall.Unmount(m.HostPath, 0); err != nil {
// Unable to unmount paths could be a really big problem here // Unable to unmount paths could be a really big problem here
@ -520,6 +544,8 @@ func (c *Container) unmountHostMounts() error {
logger.WithError(err).Warn("Could not be removed") logger.WithError(err).Warn("Could not be removed")
return err return err
} }
span.Finish()
} }
} }
@ -528,6 +554,9 @@ func (c *Container) unmountHostMounts() error {
// newContainer creates a Container structure from a sandbox and a container configuration. // newContainer creates a Container structure from a sandbox and a container configuration.
func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, error) { func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, error) {
span, _ := sandbox.trace("newContainer")
defer span.Finish()
if contConfig.valid() == false { if contConfig.valid() == false {
return &Container{}, fmt.Errorf("Invalid container configuration") return &Container{}, fmt.Errorf("Invalid container configuration")
} }
@ -544,6 +573,7 @@ func newContainer(sandbox *Sandbox, contConfig ContainerConfig) (*Container, err
state: State{}, state: State{},
process: Process{}, process: Process{},
mounts: contConfig.Mounts, mounts: contConfig.Mounts,
ctx: sandbox.ctx,
} }
state, err := c.sandbox.storage.fetchContainerState(c.sandboxID, c.id) state, err := c.sandbox.storage.fetchContainerState(c.sandboxID, c.id)
@ -790,6 +820,9 @@ func (c *Container) start() error {
} }
func (c *Container) stop() error { func (c *Container) stop() error {
span, _ := c.trace("stop")
defer span.Finish()
// In case the container status has been updated implicitly because // In case the container status has been updated implicitly because
// the container process has terminated, it might be possible that // the container process has terminated, it might be possible that
// someone try to stop the container, and we don't want to issue an // someone try to stop the container, and we don't want to issue an
@ -811,6 +844,9 @@ func (c *Container) stop() error {
} }
defer func() { defer func() {
span, _ := c.trace("stopShim")
defer span.Finish()
// If shim is still running something went wrong // If shim is still running something went wrong
// Make sure we stop the shim process // Make sure we stop the shim process
if running, _ := isShimRunning(c.process.Pid); running { if running, _ := isShimRunning(c.process.Pid); running {

View File

@ -5,11 +5,13 @@
package virtcontainers package virtcontainers
import "context"
// Factory controls how a new VM is created. // Factory controls how a new VM is created.
type Factory interface { type Factory interface {
// GetVM gets a new VM from the factory. // GetVM gets a new VM from the factory.
GetVM(config VMConfig) (*VM, error) GetVM(ctx context.Context, config VMConfig) (*VM, error)
// CloseFactory closes and cleans up the factory. // CloseFactory closes and cleans up the factory.
CloseFactory() CloseFactory(ctx context.Context)
} }

View File

@ -5,7 +5,11 @@
package base package base
import vc "github.com/kata-containers/runtime/virtcontainers" import (
"context"
vc "github.com/kata-containers/runtime/virtcontainers"
)
// FactoryBase is vm factory's internal base factory interfaces. // FactoryBase is vm factory's internal base factory interfaces.
// The difference between FactoryBase and Factory is that the Factory // The difference between FactoryBase and Factory is that the Factory
@ -17,8 +21,8 @@ type FactoryBase interface {
Config() vc.VMConfig Config() vc.VMConfig
// GetBaseVM returns a paused VM created by the base factory. // GetBaseVM returns a paused VM created by the base factory.
GetBaseVM() (*vc.VM, error) GetBaseVM(ctx context.Context) (*vc.VM, error)
// CloseFactory closes the base factory. // CloseFactory closes the base factory.
CloseFactory() CloseFactory(ctx context.Context)
} }

View File

@ -7,6 +7,7 @@
package cache package cache
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
@ -24,7 +25,7 @@ type cache struct {
} }
// New creates a new cached vm factory. // New creates a new cached vm factory.
func New(count uint, b base.FactoryBase) base.FactoryBase { func New(ctx context.Context, count uint, b base.FactoryBase) base.FactoryBase {
if count < 1 { if count < 1 {
return b return b
} }
@ -36,10 +37,10 @@ func New(count uint, b base.FactoryBase) base.FactoryBase {
c.wg.Add(1) c.wg.Add(1)
go func() { go func() {
for { for {
vm, err := b.GetBaseVM() vm, err := b.GetBaseVM(ctx)
if err != nil { if err != nil {
c.wg.Done() c.wg.Done()
c.CloseFactory() c.CloseFactory(ctx)
return return
} }
@ -62,7 +63,7 @@ func (c *cache) Config() vc.VMConfig {
} }
// GetBaseVM returns a base VM from cache factory's base factory. // GetBaseVM returns a base VM from cache factory's base factory.
func (c *cache) GetBaseVM() (*vc.VM, error) { func (c *cache) GetBaseVM(ctx context.Context) (*vc.VM, error) {
vm, ok := <-c.cacheCh vm, ok := <-c.cacheCh
if ok { if ok {
return vm, nil return vm, nil
@ -71,13 +72,13 @@ func (c *cache) GetBaseVM() (*vc.VM, error) {
} }
// CloseFactory closes the cache factory. // CloseFactory closes the cache factory.
func (c *cache) CloseFactory() { func (c *cache) CloseFactory(ctx context.Context) {
c.closeOnce.Do(func() { c.closeOnce.Do(func() {
for len(c.closed) < cap(c.closed) { // send sufficient closed signal for len(c.closed) < cap(c.closed) { // send sufficient closed signal
c.closed <- 0 c.closed <- 0
} }
c.wg.Wait() c.wg.Wait()
close(c.cacheCh) close(c.cacheCh)
c.base.CloseFactory() c.base.CloseFactory(ctx)
}) })
} }

View File

@ -6,6 +6,7 @@
package cache package cache
import ( import (
"context"
"io/ioutil" "io/ioutil"
"testing" "testing"
@ -29,16 +30,18 @@ func TestTemplateFactory(t *testing.T) {
HypervisorConfig: hyperConfig, HypervisorConfig: hyperConfig,
} }
ctx := context.Background()
// New // New
f := New(2, direct.New(vmConfig)) f := New(ctx, 2, direct.New(ctx, vmConfig))
// Config // Config
assert.Equal(f.Config(), vmConfig) assert.Equal(f.Config(), vmConfig)
// GetBaseVM // GetBaseVM
_, err := f.GetBaseVM() _, err := f.GetBaseVM(ctx)
assert.Nil(err) assert.Nil(err)
// CloseFactory // CloseFactory
f.CloseFactory() f.CloseFactory(ctx)
} }

View File

@ -7,6 +7,8 @@
package direct package direct
import ( import (
"context"
vc "github.com/kata-containers/runtime/virtcontainers" vc "github.com/kata-containers/runtime/virtcontainers"
"github.com/kata-containers/runtime/virtcontainers/factory/base" "github.com/kata-containers/runtime/virtcontainers/factory/base"
) )
@ -16,7 +18,7 @@ type direct struct {
} }
// New returns a new direct vm factory. // New returns a new direct vm factory.
func New(config vc.VMConfig) base.FactoryBase { func New(ctx context.Context, config vc.VMConfig) base.FactoryBase {
return &direct{config} return &direct{config}
} }
@ -26,8 +28,8 @@ func (d *direct) Config() vc.VMConfig {
} }
// GetBaseVM create a new VM directly. // GetBaseVM create a new VM directly.
func (d *direct) GetBaseVM() (*vc.VM, error) { func (d *direct) GetBaseVM(ctx context.Context) (*vc.VM, error) {
vm, err := vc.NewVM(d.config) vm, err := vc.NewVM(ctx, d.config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -42,5 +44,5 @@ func (d *direct) GetBaseVM() (*vc.VM, error) {
} }
// CloseFactory closes the direct vm factory. // CloseFactory closes the direct vm factory.
func (d *direct) CloseFactory() { func (d *direct) CloseFactory(ctx context.Context) {
} }

View File

@ -6,6 +6,7 @@
package direct package direct
import ( import (
"context"
"io/ioutil" "io/ioutil"
"testing" "testing"
@ -28,16 +29,18 @@ func TestTemplateFactory(t *testing.T) {
HypervisorConfig: hyperConfig, HypervisorConfig: hyperConfig,
} }
ctx := context.Background()
// New // New
f := New(vmConfig) f := New(ctx, vmConfig)
// Config // Config
assert.Equal(f.Config(), vmConfig) assert.Equal(f.Config(), vmConfig)
// GetBaseVM // GetBaseVM
_, err := f.GetBaseVM() _, err := f.GetBaseVM(ctx)
assert.Nil(err) assert.Nil(err)
// CloseFactory // CloseFactory
f.CloseFactory() f.CloseFactory(ctx)
} }

View File

@ -15,6 +15,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/factory/cache" "github.com/kata-containers/runtime/virtcontainers/factory/cache"
"github.com/kata-containers/runtime/virtcontainers/factory/direct" "github.com/kata-containers/runtime/virtcontainers/factory/direct"
"github.com/kata-containers/runtime/virtcontainers/factory/template" "github.com/kata-containers/runtime/virtcontainers/factory/template"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -36,8 +37,19 @@ type factory struct {
base base.FactoryBase base base.FactoryBase
} }
func trace(parent context.Context, name string) (opentracing.Span, context.Context) {
span, ctx := opentracing.StartSpanFromContext(parent, name)
span.SetTag("subsystem", "factory")
return span, ctx
}
// NewFactory returns a working factory. // NewFactory returns a working factory.
func NewFactory(config Config, fetchOnly bool) (vc.Factory, error) { func NewFactory(ctx context.Context, config Config, fetchOnly bool) (vc.Factory, error) {
span, _ := trace(ctx, "NewFactory")
defer span.Finish()
err := config.validate() err := config.validate()
if err != nil { if err != nil {
return nil, err return nil, err
@ -55,14 +67,14 @@ func NewFactory(config Config, fetchOnly bool) (vc.Factory, error) {
return nil, err return nil, err
} }
} else { } else {
b = template.New(config.VMConfig) b = template.New(ctx, config.VMConfig)
} }
} else { } else {
b = direct.New(config.VMConfig) b = direct.New(ctx, config.VMConfig)
} }
if config.Cache > 0 { if config.Cache > 0 {
b = cache.New(config.Cache, b) b = cache.New(ctx, config.Cache, b)
} }
return &factory{b}, nil return &factory{b}, nil
@ -118,7 +130,10 @@ func (f *factory) checkConfig(config vc.VMConfig) error {
} }
// GetVM returns a working blank VM created by the factory. // GetVM returns a working blank VM created by the factory.
func (f *factory) GetVM(config vc.VMConfig) (*vc.VM, error) { func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
span, _ := trace(ctx, "GetVM")
defer span.Finish()
hypervisorConfig := config.HypervisorConfig hypervisorConfig := config.HypervisorConfig
err := config.Valid() err := config.Valid()
if err != nil { if err != nil {
@ -129,11 +144,11 @@ func (f *factory) GetVM(config vc.VMConfig) (*vc.VM, error) {
err = f.checkConfig(config) err = f.checkConfig(config)
if err != nil { if err != nil {
f.log().WithError(err).Info("fallback to direct factory vm") f.log().WithError(err).Info("fallback to direct factory vm")
return direct.New(config).GetBaseVM() return direct.New(ctx, config).GetBaseVM(ctx)
} }
f.log().Info("get base VM") f.log().Info("get base VM")
vm, err := f.base.GetBaseVM() vm, err := f.base.GetBaseVM(ctx)
if err != nil { if err != nil {
f.log().WithError(err).Error("failed to get base VM") f.log().WithError(err).Error("failed to get base VM")
return nil, err return nil, err
@ -187,6 +202,6 @@ func (f *factory) GetVM(config vc.VMConfig) (*vc.VM, error) {
} }
// CloseFactory closes the factory. // CloseFactory closes the factory.
func (f *factory) CloseFactory() { func (f *factory) CloseFactory(ctx context.Context) {
f.base.CloseFactory() f.base.CloseFactory(ctx)
} }

View File

@ -21,9 +21,10 @@ func TestNewFactory(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
_, err := NewFactory(config, true) ctx := context.Background()
_, err := NewFactory(ctx, config, true)
assert.Error(err) assert.Error(err)
_, err = NewFactory(config, false) _, err = NewFactory(ctx, config, false)
assert.Error(err) assert.Error(err)
config.VMConfig = vc.VMConfig{ config.VMConfig = vc.VMConfig{
@ -31,7 +32,7 @@ func TestNewFactory(t *testing.T) {
AgentType: vc.NoopAgentType, AgentType: vc.NoopAgentType,
} }
_, err = NewFactory(config, false) _, err = NewFactory(ctx, config, false)
assert.Error(err) assert.Error(err)
testDir, _ := ioutil.TempDir("", "vmfactory-tmp-") testDir, _ := ioutil.TempDir("", "vmfactory-tmp-")
@ -42,29 +43,29 @@ func TestNewFactory(t *testing.T) {
} }
// direct // direct
_, err = NewFactory(config, false) _, err = NewFactory(ctx, config, false)
assert.Nil(err) assert.Nil(err)
_, err = NewFactory(config, true) _, err = NewFactory(ctx, config, true)
assert.Nil(err) assert.Nil(err)
// template // template
config.Template = true config.Template = true
_, err = NewFactory(config, false) _, err = NewFactory(ctx, config, false)
assert.Nil(err) assert.Nil(err)
_, err = NewFactory(config, true) _, err = NewFactory(ctx, config, true)
assert.Error(err) assert.Error(err)
// Cache // Cache
config.Cache = 10 config.Cache = 10
_, err = NewFactory(config, false) _, err = NewFactory(ctx, config, false)
assert.Nil(err) assert.Nil(err)
_, err = NewFactory(config, true) _, err = NewFactory(ctx, config, true)
assert.Error(err) assert.Error(err)
config.Template = false config.Template = false
_, err = NewFactory(config, false) _, err = NewFactory(ctx, config, false)
assert.Nil(err) assert.Nil(err)
_, err = NewFactory(config, true) _, err = NewFactory(ctx, config, true)
assert.Error(err) assert.Error(err)
} }
@ -80,7 +81,8 @@ func TestFactorySetLogger(t *testing.T) {
KernelPath: "foo", KernelPath: "foo",
ImagePath: "bar", ImagePath: "bar",
} }
vf, err := NewFactory(config, false) ctx := context.Background()
vf, err := NewFactory(ctx, config, false)
assert.Nil(err) assert.Nil(err)
f, ok := vf.(*factory) f, ok := vf.(*factory)
@ -167,66 +169,68 @@ func TestFactoryGetVM(t *testing.T) {
HypervisorConfig: hyperConfig, HypervisorConfig: hyperConfig,
} }
ctx := context.Background()
// direct factory // direct factory
f, err := NewFactory(Config{VMConfig: vmConfig}, false) f, err := NewFactory(ctx, Config{VMConfig: vmConfig}, false)
assert.Nil(err) assert.Nil(err)
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
f.CloseFactory() f.CloseFactory(ctx)
// template factory // template factory
f, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, false) f, err = NewFactory(ctx, Config{Template: true, VMConfig: vmConfig}, false)
assert.Nil(err) assert.Nil(err)
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
f.CloseFactory() f.CloseFactory(ctx)
// fetch template factory // fetch template factory
f, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, false) f, err = NewFactory(ctx, Config{Template: true, VMConfig: vmConfig}, false)
assert.Nil(err) assert.Nil(err)
_, err = NewFactory(Config{Template: true, VMConfig: vmConfig}, true) _, err = NewFactory(ctx, Config{Template: true, VMConfig: vmConfig}, true)
assert.Error(err) assert.Error(err)
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
f.CloseFactory() f.CloseFactory(ctx)
// cache factory over direct factory // cache factory over direct factory
f, err = NewFactory(Config{Cache: 2, VMConfig: vmConfig}, false) f, err = NewFactory(ctx, Config{Cache: 2, VMConfig: vmConfig}, false)
assert.Nil(err) assert.Nil(err)
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
f.CloseFactory() f.CloseFactory(ctx)
// cache factory over template factory // cache factory over template factory
f, err = NewFactory(Config{Template: true, Cache: 2, VMConfig: vmConfig}, false) f, err = NewFactory(ctx, Config{Template: true, Cache: 2, VMConfig: vmConfig}, false)
assert.Nil(err) assert.Nil(err)
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
// CPU hotplug // CPU hotplug
vmConfig.HypervisorConfig.DefaultVCPUs++ vmConfig.HypervisorConfig.DefaultVCPUs++
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
// Memory hotplug // Memory hotplug
vmConfig.HypervisorConfig.DefaultMemSz += 128 vmConfig.HypervisorConfig.DefaultMemSz += 128
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
// checkConfig fall back // checkConfig fall back
vmConfig.HypervisorConfig.Mlock = true vmConfig.HypervisorConfig.Mlock = true
_, err = f.GetVM(vmConfig) _, err = f.GetVM(ctx, vmConfig)
assert.Nil(err) assert.Nil(err)
f.CloseFactory() f.CloseFactory(ctx)
} }

View File

@ -7,6 +7,7 @@
package template package template
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"syscall" "syscall"
@ -37,20 +38,20 @@ func Fetch(config vc.VMConfig) (base.FactoryBase, error) {
} }
// New creates a new VM template factory. // New creates a new VM template factory.
func New(config vc.VMConfig) base.FactoryBase { func New(ctx context.Context, config vc.VMConfig) base.FactoryBase {
statePath := vc.RunVMStoragePath + "/template" statePath := vc.RunVMStoragePath + "/template"
t := &template{statePath, config} t := &template{statePath, config}
err := t.prepareTemplateFiles() err := t.prepareTemplateFiles()
if err != nil { if err != nil {
// fallback to direct factory if template is not supported. // fallback to direct factory if template is not supported.
return direct.New(config) return direct.New(ctx, config)
} }
err = t.createTemplateVM() err = t.createTemplateVM(ctx)
if err != nil { if err != nil {
// fallback to direct factory if template is not supported. // fallback to direct factory if template is not supported.
return direct.New(config) return direct.New(ctx, config)
} }
return t return t
@ -62,12 +63,12 @@ func (t *template) Config() vc.VMConfig {
} }
// GetBaseVM creates a new paused VM from the template VM. // GetBaseVM creates a new paused VM from the template VM.
func (t *template) GetBaseVM() (*vc.VM, error) { func (t *template) GetBaseVM(ctx context.Context) (*vc.VM, error) {
return t.createFromTemplateVM() return t.createFromTemplateVM(ctx)
} }
// CloseFactory cleans up the template VM. // CloseFactory cleans up the template VM.
func (t *template) CloseFactory() { func (t *template) CloseFactory(ctx context.Context) {
syscall.Unmount(t.statePath, 0) syscall.Unmount(t.statePath, 0)
os.RemoveAll(t.statePath) os.RemoveAll(t.statePath)
} }
@ -92,7 +93,7 @@ func (t *template) prepareTemplateFiles() error {
return nil return nil
} }
func (t *template) createTemplateVM() error { func (t *template) createTemplateVM(ctx context.Context) error {
// create the template vm // create the template vm
config := t.config config := t.config
config.HypervisorConfig.BootToBeTemplate = true config.HypervisorConfig.BootToBeTemplate = true
@ -100,7 +101,7 @@ func (t *template) createTemplateVM() error {
config.HypervisorConfig.MemoryPath = t.statePath + "/memory" config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state" config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
vm, err := vc.NewVM(config) vm, err := vc.NewVM(ctx, config)
if err != nil { if err != nil {
return err return err
} }
@ -122,14 +123,14 @@ func (t *template) createTemplateVM() error {
return nil return nil
} }
func (t *template) createFromTemplateVM() (*vc.VM, error) { func (t *template) createFromTemplateVM(ctx context.Context) (*vc.VM, error) {
config := t.config config := t.config
config.HypervisorConfig.BootToBeTemplate = false config.HypervisorConfig.BootToBeTemplate = false
config.HypervisorConfig.BootFromTemplate = true config.HypervisorConfig.BootFromTemplate = true
config.HypervisorConfig.MemoryPath = t.statePath + "/memory" config.HypervisorConfig.MemoryPath = t.statePath + "/memory"
config.HypervisorConfig.DevicesStatePath = t.statePath + "/state" config.HypervisorConfig.DevicesStatePath = t.statePath + "/state"
return vc.NewVM(config) return vc.NewVM(ctx, config)
} }
func (t *template) checkTemplateVM() error { func (t *template) checkTemplateVM() error {

View File

@ -6,6 +6,7 @@
package template package template
import ( import (
"context"
"io/ioutil" "io/ioutil"
"os" "os"
"testing" "testing"
@ -29,14 +30,16 @@ func TestTemplateFactory(t *testing.T) {
HypervisorConfig: hyperConfig, HypervisorConfig: hyperConfig,
} }
ctx := context.Background()
// New // New
f := New(vmConfig) f := New(ctx, vmConfig)
// Config // Config
assert.Equal(f.Config(), vmConfig) assert.Equal(f.Config(), vmConfig)
// GetBaseVM // GetBaseVM
_, err := f.GetBaseVM() _, err := f.GetBaseVM(ctx)
assert.Nil(err) assert.Nil(err)
// Fetch // Fetch
@ -58,13 +61,13 @@ func TestTemplateFactory(t *testing.T) {
err = tt.checkTemplateVM() err = tt.checkTemplateVM()
assert.Nil(err) assert.Nil(err)
err = tt.createTemplateVM() err = tt.createTemplateVM(ctx)
assert.Nil(err) assert.Nil(err)
_, err = tt.GetBaseVM() _, err = tt.GetBaseVM(ctx)
assert.Nil(err) assert.Nil(err)
// CloseFactory // CloseFactory
f.CloseFactory() f.CloseFactory(ctx)
tt.CloseFactory() tt.CloseFactory(ctx)
} }

View File

@ -6,12 +6,14 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/kata-containers/runtime/virtcontainers/device/api" "github.com/kata-containers/runtime/virtcontainers/device/api"
@ -115,7 +117,7 @@ var RunVMStoragePath = filepath.Join("/run", storagePathSuffix, vmPathSuffix)
// The default resource storage implementation is filesystem. // The default resource storage implementation is filesystem.
type resourceStorage interface { type resourceStorage interface {
// Create all resources for a sandbox // Create all resources for a sandbox
createAllResources(sandbox *Sandbox) error createAllResources(ctx context.Context, sandbox *Sandbox) error
// Resources URIs functions return both the URI // Resources URIs functions return both the URI
// for the actual resource and the URI base. // for the actual resource and the URI base.
@ -155,14 +157,37 @@ type resourceStorage interface {
// filesystem is a resourceStorage interface implementation for a local filesystem. // filesystem is a resourceStorage interface implementation for a local filesystem.
type filesystem struct { type filesystem struct {
ctx context.Context
} }
// Logger returns a logrus logger appropriate for logging filesystem messages // Logger returns a logrus logger appropriate for logging filesystem messages
func (fs *filesystem) Logger() *logrus.Entry { func (fs *filesystem) Logger() *logrus.Entry {
return virtLog.WithField("subsystem", "filesystem") return virtLog.WithFields(logrus.Fields{
"subsystem": "storage",
"type": "filesystem",
})
} }
func (fs *filesystem) createAllResources(sandbox *Sandbox) (err error) { func (fs *filesystem) trace(name string) (opentracing.Span, context.Context) {
if fs.ctx == nil {
fs.Logger().WithField("type", "bug").Error("trace called before context set")
fs.ctx = context.Background()
}
span, ctx := opentracing.StartSpanFromContext(fs.ctx, name)
span.SetTag("subsystem", "storage")
span.SetTag("type", "filesystem")
return span, ctx
}
func (fs *filesystem) createAllResources(ctx context.Context, sandbox *Sandbox) (err error) {
fs.ctx = ctx
span, _ := fs.trace("createAllResources")
defer span.Finish()
for _, resource := range []sandboxResource{stateFileType, configFileType} { for _, resource := range []sandboxResource{stateFileType, configFileType} {
_, path, _ := fs.sandboxURI(sandbox.id, resource) _, path, _ := fs.sandboxURI(sandbox.id, resource)
err = os.MkdirAll(path, dirMode) err = os.MkdirAll(path, dirMode)
@ -389,6 +414,33 @@ func resourceNeedsContainerID(sandboxSpecific bool, resource sandboxResource) bo
} }
} }
func resourceName(resource sandboxResource) string {
switch resource {
case agentFileType:
return "agentFileType"
case configFileType:
return "configFileType"
case devicesFileType:
return "devicesFileType"
case devicesIDFileType:
return "devicesIDFileType"
case hypervisorFileType:
return "hypervisorFileType"
case lockFileType:
return "lockFileType"
case mountsFileType:
return "mountsFileType"
case networkFileType:
return "networkFileType"
case processFileType:
return "processFileType"
case stateFileType:
return "stateFileType"
default:
return ""
}
}
func resourceDir(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource) (string, error) { func resourceDir(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource) (string, error) {
if sandboxID == "" { if sandboxID == "" {
return "", errNeedSandboxID return "", errNeedSandboxID
@ -635,6 +687,17 @@ func (fs *filesystem) storeResource(sandboxSpecific bool, sandboxID, containerID
} }
func (fs *filesystem) fetchResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, data interface{}) error { func (fs *filesystem) fetchResource(sandboxSpecific bool, sandboxID, containerID string, resource sandboxResource, data interface{}) error {
var span opentracing.Span
if fs.ctx != nil {
span, _ = fs.trace("fetchResource")
defer span.Finish()
span.SetTag("sandbox", sandboxID)
span.SetTag("container", containerID)
span.SetTag("resource", resourceName(resource))
}
if err := fs.commonResourceChecks(sandboxSpecific, sandboxID, containerID, resource); err != nil { if err := fs.commonResourceChecks(sandboxSpecific, sandboxID, containerID, resource); err != nil {
return err return err
} }
@ -715,6 +778,9 @@ func (fs *filesystem) storeHypervisorState(sandboxID string, state interface{})
} }
func (fs *filesystem) storeAgentState(sandboxID string, state interface{}) error { func (fs *filesystem) storeAgentState(sandboxID string, state interface{}) error {
span, _ := fs.trace("storeAgentState")
defer span.Finish()
agentFile, _, err := fs.resourceURI(true, sandboxID, "", agentFileType) agentFile, _, err := fs.resourceURI(true, sandboxID, "", agentFileType)
if err != nil { if err != nil {
return err return err

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -55,7 +56,7 @@ func TestFilesystemCreateAllResourcesSuccessful(t *testing.T) {
os.RemoveAll(runPath) os.RemoveAll(runPath)
} }
err := fs.createAllResources(sandbox) err := fs.createAllResources(context.Background(), sandbox)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -102,7 +103,7 @@ func TestFilesystemCreateAllResourcesFailingSandboxIDEmpty(t *testing.T) {
sandbox := &Sandbox{} sandbox := &Sandbox{}
err := fs.createAllResources(sandbox) err := fs.createAllResources(context.Background(), sandbox)
if err == nil { if err == nil {
t.Fatal() t.Fatal()
} }
@ -120,7 +121,7 @@ func TestFilesystemCreateAllResourcesFailingContainerIDEmpty(t *testing.T) {
containers: containers, containers: containers,
} }
err := fs.createAllResources(sandbox) err := fs.createAllResources(context.Background(), sandbox)
if err == nil { if err == nil {
t.Fatal() t.Fatal()
} }

View File

@ -7,15 +7,19 @@ package virtcontainers
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"strings"
"syscall" "syscall"
"time" "time"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations" vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -48,7 +52,22 @@ func buildHookState(processID int, s *Sandbox) specs.State {
} }
} }
func (h *Hook) trace(ctx context.Context, name string) (opentracing.Span, context.Context) {
return traceWithSubsys(ctx, "hook", name)
}
func (h *Hooks) trace(ctx context.Context, name string) (opentracing.Span, context.Context) {
return traceWithSubsys(ctx, "hooks", name)
}
func (h *Hook) runHook(s *Sandbox) error { func (h *Hook) runHook(s *Sandbox) error {
span, _ := h.trace(s.ctx, "runHook")
defer span.Finish()
span.LogFields(
log.String("hook-name", h.Path),
log.String("hook-args", strings.Join(h.Args, " ")))
state := buildHookState(os.Getpid(), s) state := buildHookState(os.Getpid(), s)
stateJSON, err := json.Marshal(state) stateJSON, err := json.Marshal(state)
if err != nil { if err != nil {
@ -100,6 +119,9 @@ func (h *Hook) runHook(s *Sandbox) error {
} }
func (h *Hooks) preStartHooks(s *Sandbox) error { func (h *Hooks) preStartHooks(s *Sandbox) error {
span, _ := h.trace(s.ctx, "preStartHooks")
defer span.Finish()
if len(h.PreStartHooks) == 0 { if len(h.PreStartHooks) == 0 {
return nil return nil
} }
@ -120,6 +142,9 @@ func (h *Hooks) preStartHooks(s *Sandbox) error {
} }
func (h *Hooks) postStartHooks(s *Sandbox) error { func (h *Hooks) postStartHooks(s *Sandbox) error {
span, _ := h.trace(s.ctx, "postStartHooks")
defer span.Finish()
if len(h.PostStartHooks) == 0 { if len(h.PostStartHooks) == 0 {
return nil return nil
} }
@ -140,6 +165,9 @@ func (h *Hooks) postStartHooks(s *Sandbox) error {
} }
func (h *Hooks) postStopHooks(s *Sandbox) error { func (h *Hooks) postStopHooks(s *Sandbox) error {
span, _ := h.trace(s.ctx, "postStopHooks")
defer span.Finish()
if len(h.PostStopHooks) == 0 { if len(h.PostStopHooks) == 0 {
return nil return nil
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
@ -90,6 +91,7 @@ func createTestSandbox() *Sandbox {
annotationsLock: &sync.RWMutex{}, annotationsLock: &sync.RWMutex{},
config: c, config: c,
id: testSandboxID, id: testSandboxID,
ctx: context.Background(),
} }
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"net" "net"
"net/url" "net/url"
@ -82,6 +83,8 @@ type hyper struct {
state HyperAgentState state HyperAgentState
sockets []Socket sockets []Socket
ctx context.Context
} }
type hyperstartProxyCmd struct { type hyperstartProxyCmd struct {
@ -258,7 +261,10 @@ func fsMapFromDevices(c *Container) ([]*hyperstart.FsmapDescriptor, error) {
} }
// init is the agent initialization implementation for hyperstart. // init is the agent initialization implementation for hyperstart.
func (h *hyper) init(sandbox *Sandbox, config interface{}) (err error) { func (h *hyper) init(ctx context.Context, sandbox *Sandbox, config interface{}) (err error) {
// save
h.ctx = ctx
switch c := config.(type) { switch c := config.(type) {
case HyperConfig: case HyperConfig:
// Create agent sockets from paths provided through // Create agent sockets from paths provided through
@ -503,8 +509,8 @@ func (h *hyper) startOneContainer(sandbox *Sandbox, c *Container) error {
container.Fstype = c.state.Fstype container.Fstype = c.state.Fstype
} else { } else {
if err := bindMountContainerRootfs(defaultSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil { if err := bindMountContainerRootfs(c.ctx, defaultSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil {
bindUnmountAllRootfs(defaultSharedDir, sandbox) bindUnmountAllRootfs(c.ctx, defaultSharedDir, sandbox)
return err return err
} }
} }
@ -514,7 +520,7 @@ func (h *hyper) startOneContainer(sandbox *Sandbox, c *Container) error {
// Handle container mounts // Handle container mounts
newMounts, err := c.mountSharedDirMounts(defaultSharedDir, "") newMounts, err := c.mountSharedDirMounts(defaultSharedDir, "")
if err != nil { if err != nil {
bindUnmountAllRootfs(defaultSharedDir, sandbox) bindUnmountAllRootfs(c.ctx, defaultSharedDir, sandbox)
return err return err
} }
@ -599,7 +605,7 @@ func (h *hyper) stopOneContainer(sandboxID string, c Container) error {
} }
if c.state.Fstype == "" { if c.state.Fstype == "" {
if err := bindUnmountContainerRootfs(defaultSharedDir, sandboxID, c.id); err != nil { if err := bindUnmountContainerRootfs(c.ctx, defaultSharedDir, sandboxID, c.id); err != nil {
return err return err
} }
} }

View File

@ -7,6 +7,7 @@ package virtcontainers
import ( import (
"bufio" "bufio"
"context"
"fmt" "fmt"
"os" "os"
"runtime" "runtime"
@ -545,7 +546,8 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
// hypervisor is the virtcontainers hypervisor interface. // hypervisor is the virtcontainers hypervisor interface.
// The default hypervisor implementation is Qemu. // The default hypervisor implementation is Qemu.
type hypervisor interface { type hypervisor interface {
init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error
createSandbox() error createSandbox() error
startSandbox() error startSandbox() error
waitSandbox(timeout int) error waitSandbox(timeout int) error

View File

@ -24,6 +24,7 @@ import (
ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter" ns "github.com/kata-containers/runtime/virtcontainers/pkg/nsenter"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
"github.com/kata-containers/runtime/virtcontainers/utils" "github.com/kata-containers/runtime/virtcontainers/utils"
opentracing "github.com/opentracing/opentracing-go"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-spec/specs-go"
@ -96,6 +97,21 @@ type kataAgent struct {
proxyBuiltIn bool proxyBuiltIn bool
vmSocket interface{} vmSocket interface{}
ctx context.Context
}
func (k *kataAgent) trace(name string) (opentracing.Span, context.Context) {
if k.ctx == nil {
k.Logger().WithField("type", "bug").Error("trace called before context set")
k.ctx = context.Background()
}
span, ctx := opentracing.StartSpanFromContext(k.ctx, name)
span.SetTag("subsystem", "agent")
span.SetTag("type", "kata")
return span, ctx
} }
func (k *kataAgent) Logger() *logrus.Entry { func (k *kataAgent) Logger() *logrus.Entry {
@ -135,7 +151,13 @@ func (k *kataAgent) generateVMSocket(id string, c KataAgentConfig) error {
return nil return nil
} }
func (k *kataAgent) init(sandbox *Sandbox, config interface{}) (err error) { func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config interface{}) (err error) {
// save
k.ctx = sandbox.ctx
span, _ := k.trace("init")
defer span.Finish()
switch c := config.(type) { switch c := config.(type) {
case KataAgentConfig: case KataAgentConfig:
if err := k.generateVMSocket(sandbox.id, c); err != nil { if err := k.generateVMSocket(sandbox.id, c); err != nil {
@ -240,6 +262,9 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, builtin bool,
} }
func (k *kataAgent) createSandbox(sandbox *Sandbox) error { func (k *kataAgent) createSandbox(sandbox *Sandbox) error {
span, _ := k.trace("createSandbox")
defer span.Finish()
return k.configure(sandbox.hypervisor, sandbox.id, k.getSharePath(sandbox.id), k.proxyBuiltIn, nil) return k.configure(sandbox.hypervisor, sandbox.id, k.getSharePath(sandbox.id), k.proxyBuiltIn, nil)
} }
@ -321,6 +346,9 @@ func cmdEnvsToStringSlice(ev []EnvVar) []string {
} }
func (k *kataAgent) exec(sandbox *Sandbox, c Container, cmd Cmd) (*Process, error) { func (k *kataAgent) exec(sandbox *Sandbox, c Container, cmd Cmd) (*Process, error) {
span, _ := k.trace("exec")
defer span.Finish()
var kataProcess *grpc.Process var kataProcess *grpc.Process
kataProcess, err := cmdToKataProcess(cmd) kataProcess, err := cmdToKataProcess(cmd)
@ -354,6 +382,8 @@ func (k *kataAgent) exec(sandbox *Sandbox, c Container, cmd Cmd) (*Process, erro
} }
func (k *kataAgent) generateInterfacesAndRoutes(networkNS NetworkNamespace) ([]*grpc.Interface, []*grpc.Route, error) { func (k *kataAgent) generateInterfacesAndRoutes(networkNS NetworkNamespace) ([]*grpc.Interface, []*grpc.Route, error) {
span, _ := k.trace("generateInterfacesAndRoutes")
defer span.Finish()
if networkNS.NetNsPath == "" { if networkNS.NetNsPath == "" {
return nil, nil, nil return nil, nil, nil
@ -517,6 +547,9 @@ func (k *kataAgent) listRoutes() ([]*grpc.Route, error) {
} }
func (k *kataAgent) startProxy(sandbox *Sandbox) error { func (k *kataAgent) startProxy(sandbox *Sandbox) error {
span, _ := k.trace("startProxy")
defer span.Finish()
var err error var err error
if k.proxy == nil { if k.proxy == nil {
@ -569,6 +602,9 @@ func (k *kataAgent) startProxy(sandbox *Sandbox) error {
} }
func (k *kataAgent) startSandbox(sandbox *Sandbox) error { func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
span, _ := k.trace("startSandbox")
defer span.Finish()
err := k.startProxy(sandbox) err := k.startProxy(sandbox)
if err != nil { if err != nil {
return err return err
@ -642,6 +678,9 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
} }
func (k *kataAgent) stopSandbox(sandbox *Sandbox) error { func (k *kataAgent) stopSandbox(sandbox *Sandbox) error {
span, _ := k.trace("stopSandbox")
defer span.Finish()
if k.proxy == nil { if k.proxy == nil {
return errorMissingProxy return errorMissingProxy
} }
@ -811,7 +850,7 @@ func (k *kataAgent) rollbackFailingContainerCreation(c *Container) {
k.Logger().WithError(err2).Error("rollback failed unmountHostMounts()") k.Logger().WithError(err2).Error("rollback failed unmountHostMounts()")
} }
if err2 := bindUnmountContainerRootfs(kataHostSharedDir, c.sandbox.id, c.id); err2 != nil { if err2 := bindUnmountContainerRootfs(k.ctx, kataHostSharedDir, c.sandbox.id, c.id); err2 != nil {
k.Logger().WithError(err2).Error("rollback failed bindUnmountContainerRootfs()") k.Logger().WithError(err2).Error("rollback failed bindUnmountContainerRootfs()")
} }
} }
@ -861,7 +900,7 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
// (kataGuestSharedDir) is already mounted in the // (kataGuestSharedDir) is already mounted in the
// guest. We only need to mount the rootfs from // guest. We only need to mount the rootfs from
// the host and it will show up in the guest. // the host and it will show up in the guest.
if err := bindMountContainerRootfs(kataHostSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil { if err := bindMountContainerRootfs(k.ctx, kataHostSharedDir, sandbox.id, c.id, c.rootFs, false); err != nil {
return nil, err return nil, err
} }
@ -869,6 +908,9 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
} }
func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) { func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) {
span, _ := k.trace("createContainer")
defer span.Finish()
ociSpecJSON, ok := c.config.Annotations[vcAnnotations.ConfigJSONKey] ociSpecJSON, ok := c.config.Annotations[vcAnnotations.ConfigJSONKey]
if !ok { if !ok {
return nil, errorMissingOCISpec return nil, errorMissingOCISpec
@ -1101,6 +1143,9 @@ func (k *kataAgent) handlePidNamespace(grpcSpec *grpc.Spec, sandbox *Sandbox) (b
} }
func (k *kataAgent) startContainer(sandbox *Sandbox, c *Container) error { func (k *kataAgent) startContainer(sandbox *Sandbox, c *Container) error {
span, _ := k.trace("startContainer")
defer span.Finish()
req := &grpc.StartContainerRequest{ req := &grpc.StartContainerRequest{
ContainerId: c.id, ContainerId: c.id,
} }
@ -1110,6 +1155,9 @@ func (k *kataAgent) startContainer(sandbox *Sandbox, c *Container) error {
} }
func (k *kataAgent) stopContainer(sandbox *Sandbox, c Container) error { func (k *kataAgent) stopContainer(sandbox *Sandbox, c Container) error {
span, _ := k.trace("stopContainer")
defer span.Finish()
req := &grpc.RemoveContainerRequest{ req := &grpc.RemoveContainerRequest{
ContainerId: c.id, ContainerId: c.id,
} }
@ -1122,7 +1170,7 @@ func (k *kataAgent) stopContainer(sandbox *Sandbox, c Container) error {
return err return err
} }
if err := bindUnmountContainerRootfs(kataHostSharedDir, sandbox.id, c.id); err != nil { if err := bindUnmountContainerRootfs(k.ctx, kataHostSharedDir, sandbox.id, c.id); err != nil {
return err return err
} }
@ -1261,6 +1309,9 @@ func (k *kataAgent) connect() error {
return nil return nil
} }
span, _ := k.trace("connect")
defer span.Finish()
// This is for the first connection only, to prevent race // This is for the first connection only, to prevent race
k.Lock() k.Lock()
defer k.Unlock() defer k.Unlock()
@ -1281,6 +1332,9 @@ func (k *kataAgent) connect() error {
} }
func (k *kataAgent) disconnect() error { func (k *kataAgent) disconnect() error {
span, _ := k.trace("disconnect")
defer span.Finish()
k.Lock() k.Lock()
defer k.Unlock() defer k.Unlock()
@ -1300,6 +1354,9 @@ func (k *kataAgent) disconnect() error {
// check grpc server is serving // check grpc server is serving
func (k *kataAgent) check() error { func (k *kataAgent) check() error {
span, _ := k.trace("check")
defer span.Finish()
_, err := k.sendReq(&grpc.CheckRequest{}) _, err := k.sendReq(&grpc.CheckRequest{})
if err != nil { if err != nil {
err = fmt.Errorf("Failed to check if grpc server is working: %s", err) err = fmt.Errorf("Failed to check if grpc server is working: %s", err)
@ -1308,6 +1365,9 @@ func (k *kataAgent) check() error {
} }
func (k *kataAgent) waitProcess(c *Container, processID string) (int32, error) { func (k *kataAgent) waitProcess(c *Container, processID string) (int32, error) {
span, _ := k.trace("waitProcess")
defer span.Finish()
resp, err := k.sendReq(&grpc.WaitProcessRequest{ resp, err := k.sendReq(&grpc.WaitProcessRequest{
ContainerId: c.id, ContainerId: c.id,
ExecId: processID, ExecId: processID,
@ -1428,6 +1488,10 @@ func (k *kataAgent) installReqFunc(c *kataclient.AgentClient) {
} }
func (k *kataAgent) sendReq(request interface{}) (interface{}, error) { func (k *kataAgent) sendReq(request interface{}) (interface{}, error) {
span, _ := k.trace("sendReq")
span.SetTag("request", request)
defer span.Finish()
if err := k.connect(); err != nil { if err := k.connect(); err != nil {
return nil, err return nil, err
} }
@ -1443,7 +1507,7 @@ func (k *kataAgent) sendReq(request interface{}) (interface{}, error) {
message := request.(proto.Message) message := request.(proto.Message)
k.Logger().WithField("name", msgName).WithField("req", message.String()).Debug("sending request") k.Logger().WithField("name", msgName).WithField("req", message.String()).Debug("sending request")
return handler(context.Background(), request) return handler(k.ctx, request)
} }
// readStdout and readStderr are special that we cannot differentiate them with the request types... // readStdout and readStderr are special that we cannot differentiate them with the request types...
@ -1473,7 +1537,7 @@ func (k *kataAgent) readProcessStderr(c *Container, processID string, data []byt
type readFn func(context.Context, *grpc.ReadStreamRequest, ...golangGrpc.CallOption) (*grpc.ReadStreamResponse, error) type readFn func(context.Context, *grpc.ReadStreamRequest, ...golangGrpc.CallOption) (*grpc.ReadStreamResponse, error)
func (k *kataAgent) readProcessStream(containerID, processID string, data []byte, read readFn) (int, error) { func (k *kataAgent) readProcessStream(containerID, processID string, data []byte, read readFn) (int, error) {
resp, err := read(context.Background(), &grpc.ReadStreamRequest{ resp, err := read(k.ctx, &grpc.ReadStreamRequest{
ContainerId: containerID, ContainerId: containerID,
ExecId: processID, ExecId: processID,
Len: uint32(len(data))}) Len: uint32(len(data))})

View File

@ -5,11 +5,13 @@
package virtcontainers package virtcontainers
import "context"
type mockHypervisor struct { type mockHypervisor struct {
vCPUs uint32 vCPUs uint32
} }
func (m *mockHypervisor) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error { func (m *mockHypervisor) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error {
err := hypervisorConfig.valid() err := hypervisorConfig.valid()
if err != nil { if err != nil {
return err return err

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
) )
@ -25,8 +26,10 @@ func TestMockHypervisorInit(t *testing.T) {
storage: &filesystem{}, storage: &filesystem{},
} }
ctx := context.Background()
// wrong config // wrong config
if err := m.init(sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err == nil { if err := m.init(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err == nil {
t.Fatal() t.Fatal()
} }
@ -37,7 +40,7 @@ func TestMockHypervisorInit(t *testing.T) {
} }
// right config // right config
if err := m.init(sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil { if err := m.init(ctx, sandbox.config.ID, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -7,6 +7,7 @@ package virtcontainers
import ( import (
"bufio" "bufio"
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -229,7 +230,10 @@ const mountPerm = os.FileMode(0755)
// * evaluate all symlinks // * evaluate all symlinks
// * ensure the source exists // * ensure the source exists
// * recursively create the destination // * recursively create the destination
func bindMount(source, destination string, readonly bool) error { func bindMount(ctx context.Context, source, destination string, readonly bool) error {
span, _ := trace(ctx, "bindMount")
defer span.Finish()
if source == "" { if source == "" {
return fmt.Errorf("source must be specified") return fmt.Errorf("source must be specified")
} }
@ -259,10 +263,13 @@ func bindMount(source, destination string, readonly bool) error {
// bindMountContainerRootfs bind mounts a container rootfs into a 9pfs shared // bindMountContainerRootfs bind mounts a container rootfs into a 9pfs shared
// directory between the guest and the host. // directory between the guest and the host.
func bindMountContainerRootfs(sharedDir, sandboxID, cID, cRootFs string, readonly bool) error { func bindMountContainerRootfs(ctx context.Context, sharedDir, sandboxID, cID, cRootFs string, readonly bool) error {
span, _ := trace(ctx, "bindMountContainerRootfs")
defer span.Finish()
rootfsDest := filepath.Join(sharedDir, sandboxID, cID, rootfsDir) rootfsDest := filepath.Join(sharedDir, sandboxID, cID, rootfsDir)
return bindMount(cRootFs, rootfsDest, readonly) return bindMount(ctx, cRootFs, rootfsDest, readonly)
} }
// Mount describes a container mount. // Mount describes a container mount.
@ -288,20 +295,26 @@ type Mount struct {
BlockDeviceID string BlockDeviceID string
} }
func bindUnmountContainerRootfs(sharedDir, sandboxID, cID string) error { func bindUnmountContainerRootfs(ctx context.Context, sharedDir, sandboxID, cID string) error {
span, _ := trace(ctx, "bindUnmountContainerRootfs")
defer span.Finish()
rootfsDest := filepath.Join(sharedDir, sandboxID, cID, rootfsDir) rootfsDest := filepath.Join(sharedDir, sandboxID, cID, rootfsDir)
syscall.Unmount(rootfsDest, 0) syscall.Unmount(rootfsDest, 0)
return nil return nil
} }
func bindUnmountAllRootfs(sharedDir string, sandbox *Sandbox) { func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbox) {
span, _ := trace(ctx, "bindUnmountAllRootfs")
defer span.Finish()
for _, c := range sandbox.containers { for _, c := range sandbox.containers {
c.unmountHostMounts() c.unmountHostMounts()
if c.state.Fstype == "" { if c.state.Fstype == "" {
// Need to check for error returned by this call. // Need to check for error returned by this call.
// See: https://github.com/containers/virtcontainers/issues/295 // See: https://github.com/containers/virtcontainers/issues/295
bindUnmountContainerRootfs(sharedDir, sandbox.id, c.id) bindUnmountContainerRootfs(c.ctx, sharedDir, sandbox.id, c.id)
} }
} }
} }

View File

@ -7,6 +7,7 @@ package virtcontainers
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
@ -212,7 +213,7 @@ func TestGetDeviceForPathBindMount(t *testing.T) {
defer os.Remove(dest) defer os.Remove(dest)
err = bindMount(source, dest, false) err = bindMount(context.Background(), source, dest, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -7,6 +7,7 @@ package virtcontainers
import ( import (
"bufio" "bufio"
"context"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -1581,7 +1582,7 @@ func vhostUserSocketPath(info interface{}) (string, error) {
// between VM netns and the host network physical interface. // between VM netns and the host network physical interface.
type network interface { type network interface {
// init initializes the network, setting a new network namespace. // init initializes the network, setting a new network namespace.
init(config NetworkConfig) (string, bool, error) init(ctx context.Context, config NetworkConfig) (string, bool, error)
// run runs a callback function in a specified network namespace. // run runs a callback function in a specified network namespace.
run(networkNSPath string, cb func() error) error run(networkNSPath string, cb func() error) error

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"syscall" "syscall"
"github.com/kata-containers/agent/protocols/grpc" "github.com/kata-containers/agent/protocols/grpc"
@ -23,7 +24,7 @@ func (n *noopAgent) startProxy(sandbox *Sandbox) error {
} }
// init initializes the Noop agent, i.e. it does nothing. // init initializes the Noop agent, i.e. it does nothing.
func (n *noopAgent) init(sandbox *Sandbox, config interface{}) error { func (n *noopAgent) init(ctx context.Context, sandbox *Sandbox, config interface{}) error {
return nil return nil
} }

View File

@ -35,7 +35,7 @@ func TestNoopAgentInit(t *testing.T) {
n := &noopAgent{} n := &noopAgent{}
sandbox := &Sandbox{} sandbox := &Sandbox{}
err := n.init(sandbox, nil) err := n.init(context.Background(), sandbox, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -5,6 +5,8 @@
package virtcontainers package virtcontainers
import "context"
// noopNetwork a.k.a. NO-OP Network is an empty network implementation, for // noopNetwork a.k.a. NO-OP Network is an empty network implementation, for
// testing and mocking purposes. // testing and mocking purposes.
type noopNetwork struct { type noopNetwork struct {
@ -12,7 +14,7 @@ type noopNetwork struct {
// init initializes the network, setting a new network namespace for the Noop network. // init initializes the network, setting a new network namespace for the Noop network.
// It does nothing. // It does nothing.
func (n *noopNetwork) init(config NetworkConfig) (string, bool, error) { func (n *noopNetwork) init(ctx context.Context, config NetworkConfig) (string, bool, error) {
return "", true, nil return "", true, nil
} }

View File

@ -721,7 +721,7 @@ func TestVCMockSetVMFactory(t *testing.T) {
} }
ctx := context.Background() ctx := context.Background()
f, err := factory.NewFactory(factory.Config{VMConfig: vmConfig}, false) f, err := factory.NewFactory(ctx, factory.Config{VMConfig: vmConfig}, false)
assert.Nil(err) assert.Nil(err)
assert.Equal(factoryTriggered, 0) assert.Equal(factoryTriggered, 0)

View File

@ -17,6 +17,7 @@ import (
govmmQemu "github.com/intel/govmm/qemu" govmmQemu "github.com/intel/govmm/qemu"
"github.com/kata-containers/runtime/virtcontainers/pkg/uuid" "github.com/kata-containers/runtime/virtcontainers/pkg/uuid"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/kata-containers/runtime/virtcontainers/device/config" "github.com/kata-containers/runtime/virtcontainers/device/config"
@ -67,6 +68,8 @@ type qemu struct {
// fds is a list of file descriptors inherited by QEMU process // fds is a list of file descriptors inherited by QEMU process
// they'll be closed once QEMU process is running // they'll be closed once QEMU process is running
fds []*os.File fds []*os.File
ctx context.Context
} }
const ( const (
@ -152,6 +155,9 @@ func (q *qemu) kernelParameters() string {
// Adds all capabilities supported by qemu implementation of hypervisor interface // Adds all capabilities supported by qemu implementation of hypervisor interface
func (q *qemu) capabilities() capabilities { func (q *qemu) capabilities() capabilities {
span, _ := q.trace("capabilities")
defer span.Finish()
return q.arch.capabilities() return q.arch.capabilities()
} }
@ -176,8 +182,28 @@ func (q *qemu) qemuPath() (string, error) {
return p, nil return p, nil
} }
func (q *qemu) trace(name string) (opentracing.Span, context.Context) {
if q.ctx == nil {
q.Logger().WithField("type", "bug").Error("trace called before context set")
q.ctx = context.Background()
}
span, ctx := opentracing.StartSpanFromContext(q.ctx, name)
span.SetTag("subsystem", "hypervisor")
span.SetTag("type", "qemu")
return span, ctx
}
// init intializes the Qemu structure. // init intializes the Qemu structure.
func (q *qemu) init(id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error { func (q *qemu) init(ctx context.Context, id string, hypervisorConfig *HypervisorConfig, vmConfig Resources, storage resourceStorage) error {
// save
q.ctx = ctx
span, _ := q.trace("init")
defer span.Finish()
err := hypervisorConfig.valid() err := hypervisorConfig.valid()
if err != nil { if err != nil {
return err return err
@ -298,7 +324,7 @@ func (q *qemu) createQmpSocket() ([]govmmQemu.QMPSocket, error) {
} }
q.qmpMonitorCh = qmpChannel{ q.qmpMonitorCh = qmpChannel{
ctx: context.Background(), ctx: q.ctx,
path: monitorSockPath, path: monitorSockPath,
} }
@ -364,6 +390,9 @@ func (q *qemu) setupTemplate(knobs *govmmQemu.Knobs, memory *govmmQemu.Memory) g
// createSandbox is the Hypervisor sandbox creation implementation for govmmQemu. // createSandbox is the Hypervisor sandbox creation implementation for govmmQemu.
func (q *qemu) createSandbox() error { func (q *qemu) createSandbox() error {
span, _ := q.trace("createSandbox")
defer span.Finish()
machine, err := q.getQemuMachine() machine, err := q.getQemuMachine()
if err != nil { if err != nil {
return err return err
@ -467,6 +496,9 @@ func (q *qemu) createSandbox() error {
// startSandbox will start the Sandbox's VM. // startSandbox will start the Sandbox's VM.
func (q *qemu) startSandbox() error { func (q *qemu) startSandbox() error {
span, _ := q.trace("startSandbox")
defer span.Finish()
if q.config.Debug { if q.config.Debug {
params := q.arch.kernelParameters(q.config.Debug) params := q.arch.kernelParameters(q.config.Debug)
strParams := SerializeParams(params, "=") strParams := SerializeParams(params, "=")
@ -511,6 +543,9 @@ func (q *qemu) startSandbox() error {
// waitSandbox will wait for the Sandbox's VM to be up and running. // waitSandbox will wait for the Sandbox's VM to be up and running.
func (q *qemu) waitSandbox(timeout int) error { func (q *qemu) waitSandbox(timeout int) error {
span, _ := q.trace("waitSandbox")
defer span.Finish()
if timeout < 0 { if timeout < 0 {
return fmt.Errorf("Invalid timeout %ds", timeout) return fmt.Errorf("Invalid timeout %ds", timeout)
} }
@ -562,6 +597,9 @@ func (q *qemu) waitSandbox(timeout int) error {
// stopSandbox will stop the Sandbox's VM. // stopSandbox will stop the Sandbox's VM.
func (q *qemu) stopSandbox() error { func (q *qemu) stopSandbox() error {
span, _ := q.trace("stopSandbox")
defer span.Finish()
q.Logger().Info("Stopping Sandbox") q.Logger().Info("Stopping Sandbox")
err := q.qmpSetup() err := q.qmpSetup()
@ -584,6 +622,9 @@ func (q *qemu) stopSandbox() error {
} }
func (q *qemu) togglePauseSandbox(pause bool) error { func (q *qemu) togglePauseSandbox(pause bool) error {
span, _ := q.trace("togglePauseSandbox")
defer span.Finish()
err := q.qmpSetup() err := q.qmpSetup()
if err != nil { if err != nil {
return err return err
@ -864,6 +905,9 @@ func (q *qemu) hotplugDevice(devInfo interface{}, devType deviceType, op operati
} }
func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := q.trace("hotplugAddDevice")
defer span.Finish()
data, err := q.hotplugDevice(devInfo, devType, addDevice) data, err := q.hotplugDevice(devInfo, devType, addDevice)
if err != nil { if err != nil {
return data, err return data, err
@ -873,6 +917,9 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
} }
func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := q.trace("hotplugRemoveDevice")
defer span.Finish()
data, err := q.hotplugDevice(devInfo, devType, removeDevice) data, err := q.hotplugDevice(devInfo, devType, removeDevice)
if err != nil { if err != nil {
return data, err return data, err
@ -1030,15 +1077,24 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) error {
} }
func (q *qemu) pauseSandbox() error { func (q *qemu) pauseSandbox() error {
span, _ := q.trace("pauseSandbox")
defer span.Finish()
return q.togglePauseSandbox(true) return q.togglePauseSandbox(true)
} }
func (q *qemu) resumeSandbox() error { func (q *qemu) resumeSandbox() error {
span, _ := q.trace("resumeSandbox")
defer span.Finish()
return q.togglePauseSandbox(false) return q.togglePauseSandbox(false)
} }
// addDevice will add extra devices to Qemu command line. // addDevice will add extra devices to Qemu command line.
func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error { func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error {
span, _ := q.trace("addDevice")
defer span.Finish()
switch v := devInfo.(type) { switch v := devInfo.(type) {
case Volume: case Volume:
q.qemuConfig.Devices = q.arch.append9PVolume(q.qemuConfig.Devices, v) q.qemuConfig.Devices = q.arch.append9PVolume(q.qemuConfig.Devices, v)
@ -1065,6 +1121,9 @@ func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error {
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (q *qemu) getSandboxConsole(id string) (string, error) { func (q *qemu) getSandboxConsole(id string) (string, error) {
span, _ := q.trace("getSandboxConsole")
defer span.Finish()
return utils.BuildSocketPath(RunVMStoragePath, id, consoleSocket) return utils.BuildSocketPath(RunVMStoragePath, id, consoleSocket)
} }
@ -1101,6 +1160,9 @@ func (q *qemu) saveSandbox() error {
} }
func (q *qemu) disconnect() { func (q *qemu) disconnect() {
span, _ := q.trace("disconnect")
defer span.Finish()
q.qmpShutdown() q.qmpShutdown()
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -86,7 +87,7 @@ func TestQemuInit(t *testing.T) {
t.Fatalf("Could not create parent directory %s: %v", parentDir, err) t.Fatalf("Could not create parent directory %s: %v", parentDir, err)
} }
if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil { if err := q.init(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -117,7 +118,7 @@ func TestQemuInitMissingParentDirFail(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err := q.init(sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil { if err := q.init(context.Background(), sandbox.id, &sandbox.config.HypervisorConfig, sandbox.config.VMConfig, sandbox.storage); err != nil {
t.Fatalf("Qemu init() is not expected to fail because of missing parent directory for storage: %v", err) t.Fatalf("Qemu init() is not expected to fail because of missing parent directory for storage: %v", err)
} }
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -17,6 +18,7 @@ import (
"github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/ns"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
opentracing "github.com/opentracing/opentracing-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/kata-containers/agent/protocols/grpc" "github.com/kata-containers/agent/protocols/grpc"
@ -360,6 +362,19 @@ type SandboxConfig struct {
Stateful bool Stateful bool
} }
func (s *Sandbox) trace(name string) (opentracing.Span, context.Context) {
if s.ctx == nil {
s.Logger().WithField("type", "bug").Error("trace called before context set")
s.ctx = context.Background()
}
span, ctx := opentracing.StartSpanFromContext(s.ctx, name)
span.SetTag("subsystem", "sandbox")
return span, ctx
}
func (s *Sandbox) startProxy() error { func (s *Sandbox) startProxy() error {
// If the proxy is KataBuiltInProxyType type, it needs to restart the proxy // If the proxy is KataBuiltInProxyType type, it needs to restart the proxy
@ -477,6 +492,8 @@ type Sandbox struct {
shmSize uint64 shmSize uint64
sharePidNs bool sharePidNs bool
stateful bool stateful bool
ctx context.Context
} }
// ID returns the sandbox identifier string. // ID returns the sandbox identifier string.
@ -667,7 +684,10 @@ func (s *Sandbox) IOStream(containerID, processID string) (io.WriteCloser, io.Re
return c.ioStream(processID) return c.ioStream(processID)
} }
func createAssets(sandboxConfig *SandboxConfig) error { func createAssets(ctx context.Context, sandboxConfig *SandboxConfig) error {
span, _ := trace(ctx, "createAssets")
defer span.Finish()
kernel, err := newAsset(sandboxConfig, kernelAsset) kernel, err := newAsset(sandboxConfig, kernelAsset)
if err != nil { if err != nil {
return err return err
@ -701,12 +721,15 @@ func createAssets(sandboxConfig *SandboxConfig) error {
// It will create and store the sandbox structure, and then ask the hypervisor // It will create and store the sandbox structure, and then ask the hypervisor
// to physically create that sandbox i.e. starts a VM for that sandbox to eventually // to physically create that sandbox i.e. starts a VM for that sandbox to eventually
// be started. // be started.
func createSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) {
if err := createAssets(&sandboxConfig); err != nil { span, ctx := trace(ctx, "createSandbox")
defer span.Finish()
if err := createAssets(ctx, &sandboxConfig); err != nil {
return nil, err return nil, err
} }
s, err := newSandbox(sandboxConfig, factory) s, err := newSandbox(ctx, sandboxConfig, factory)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -746,7 +769,10 @@ func createSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, erro
return s, nil return s, nil
} }
func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) {
span, ctx := trace(ctx, "newSandbox")
defer span.Finish()
if sandboxConfig.valid() == false { if sandboxConfig.valid() == false {
return nil, fmt.Errorf("Invalid sandbox configuration") return nil, fmt.Errorf("Invalid sandbox configuration")
} }
@ -778,6 +804,7 @@ func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error)
shmSize: sandboxConfig.ShmSize, shmSize: sandboxConfig.ShmSize,
sharePidNs: sandboxConfig.SharePidNs, sharePidNs: sandboxConfig.SharePidNs,
stateful: sandboxConfig.Stateful, stateful: sandboxConfig.Stateful,
ctx: ctx,
} }
if err = globalSandboxList.addSandbox(s); err != nil { if err = globalSandboxList.addSandbox(s); err != nil {
@ -791,7 +818,7 @@ func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error)
} }
}() }()
if err = s.storage.createAllResources(s); err != nil { if err = s.storage.createAllResources(ctx, s); err != nil {
return nil, err return nil, err
} }
@ -801,7 +828,7 @@ func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error)
} }
}() }()
if err = s.hypervisor.init(s.id, &sandboxConfig.HypervisorConfig, sandboxConfig.VMConfig, s.storage); err != nil { if err = s.hypervisor.init(ctx, s.id, &sandboxConfig.HypervisorConfig, sandboxConfig.VMConfig, s.storage); err != nil {
return nil, err return nil, err
} }
@ -810,7 +837,7 @@ func newSandbox(sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error)
} }
agentConfig := newAgentConfig(sandboxConfig.AgentType, sandboxConfig.AgentConfig) agentConfig := newAgentConfig(sandboxConfig.AgentType, sandboxConfig.AgentConfig)
if err = s.agent.init(s, agentConfig); err != nil { if err = s.agent.init(ctx, s, agentConfig); err != nil {
return nil, err return nil, err
} }
@ -823,6 +850,9 @@ func (s *Sandbox) storeSandboxDevices() error {
// storeSandbox stores a sandbox config. // storeSandbox stores a sandbox config.
func (s *Sandbox) storeSandbox() error { func (s *Sandbox) storeSandbox() error {
span, _ := s.trace("storeSandbox")
defer span.Finish()
err := s.storage.storeSandboxResource(s.id, configFileType, *(s.config)) err := s.storage.storeSandboxResource(s.id, configFileType, *(s.config))
if err != nil { if err != nil {
return err return err
@ -839,7 +869,7 @@ func (s *Sandbox) storeSandbox() error {
} }
// fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox. // fetchSandbox fetches a sandbox config from a sandbox ID and returns a sandbox.
func fetchSandbox(sandboxID string) (sandbox *Sandbox, err error) { func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err error) {
virtLog.Info("fetch sandbox") virtLog.Info("fetch sandbox")
if sandboxID == "" { if sandboxID == "" {
return nil, errNeedSandboxID return nil, errNeedSandboxID
@ -857,7 +887,7 @@ func fetchSandbox(sandboxID string) (sandbox *Sandbox, err error) {
} }
// fetchSandbox is not suppose to create new sandbox VM. // fetchSandbox is not suppose to create new sandbox VM.
sandbox, err = createSandbox(config, nil) sandbox, err = createSandbox(ctx, config, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create sandbox with config %+v: %v", config, err) return nil, fmt.Errorf("failed to create sandbox with config %+v: %v", config, err)
} }
@ -938,6 +968,9 @@ func (s *Sandbox) Delete() error {
} }
func (s *Sandbox) createNetwork() error { func (s *Sandbox) createNetwork() error {
span, _ := s.trace("createNetwork")
defer span.Finish()
var netNsPath string var netNsPath string
var netNsCreated bool var netNsCreated bool
var networkNS NetworkNamespace var networkNS NetworkNamespace
@ -951,7 +984,7 @@ func (s *Sandbox) createNetwork() error {
}() }()
// Initialize the network. // Initialize the network.
netNsPath, netNsCreated, err = s.network.init(s.config.NetworkConfig) netNsPath, netNsCreated, err = s.network.init(s.ctx, s.config.NetworkConfig)
if err != nil { if err != nil {
return err return err
} }
@ -977,6 +1010,9 @@ func (s *Sandbox) createNetwork() error {
} }
func (s *Sandbox) removeNetwork() error { func (s *Sandbox) removeNetwork() error {
span, _ := s.trace("removeNetwork")
defer span.Finish()
return s.network.remove(s, s.networkNS, s.networkNS.NetNsCreated) return s.network.remove(s, s.networkNS, s.networkNS.NetNsCreated)
} }
@ -1069,13 +1105,16 @@ func (s *Sandbox) ListRoutes() ([]*grpc.Route, error) {
// startVM starts the VM. // startVM starts the VM.
func (s *Sandbox) startVM() error { func (s *Sandbox) startVM() error {
span, ctx := s.trace("startVM")
defer span.Finish()
s.Logger().Info("Starting VM") s.Logger().Info("Starting VM")
// FIXME: This would break cached VMs. We need network hotplug and move // FIXME: This would break cached VMs. We need network hotplug and move
// oci hooks and netns handling to cli. See #273. // oci hooks and netns handling to cli. See #273.
if err := s.network.run(s.networkNS.NetNsPath, func() error { if err := s.network.run(s.networkNS.NetNsPath, func() error {
if s.factory != nil { if s.factory != nil {
vm, err := s.factory.GetVM(VMConfig{ vm, err := s.factory.GetVM(ctx, VMConfig{
HypervisorType: s.config.HypervisorType, HypervisorType: s.config.HypervisorType,
HypervisorConfig: s.config.HypervisorConfig, HypervisorConfig: s.config.HypervisorConfig,
AgentType: s.config.AgentType, AgentType: s.config.AgentType,
@ -1109,6 +1148,9 @@ func (s *Sandbox) startVM() error {
// stopVM: stop the sandbox's VM // stopVM: stop the sandbox's VM
func (s *Sandbox) stopVM() error { func (s *Sandbox) stopVM() error {
span, _ := s.trace("stopVM")
defer span.Finish()
return s.hypervisor.stopSandbox() return s.hypervisor.stopSandbox()
} }
@ -1291,6 +1333,9 @@ func (s *Sandbox) StatsContainer(containerID string) (ContainerStats, error) {
// createContainers registers all containers to the proxy, create the // createContainers registers all containers to the proxy, create the
// containers in the guest and starts one shim per container. // containers in the guest and starts one shim per container.
func (s *Sandbox) createContainers() error { func (s *Sandbox) createContainers() error {
span, _ := s.trace("createContainers")
defer span.Finish()
for _, contConfig := range s.config.Containers { for _, contConfig := range s.config.Containers {
newContainer, err := createContainer(s, contConfig) newContainer, err := createContainer(s, contConfig)
if err != nil { if err != nil {
@ -1330,6 +1375,9 @@ func (s *Sandbox) start() error {
// stop stops a sandbox. The containers that are making the sandbox // stop stops a sandbox. The containers that are making the sandbox
// will be destroyed. // will be destroyed.
func (s *Sandbox) stop() error { func (s *Sandbox) stop() error {
span, _ := s.trace("stop")
defer span.Finish()
if err := s.state.validTransition(s.state.State, StateStopped); err != nil { if err := s.state.validTransition(s.state.State, StateStopped); err != nil {
return err return err
} }
@ -1510,7 +1558,10 @@ func (s *Sandbox) deleteContainersState() error {
// togglePauseSandbox pauses a sandbox if pause is set to true, else it resumes // togglePauseSandbox pauses a sandbox if pause is set to true, else it resumes
// it. // it.
func togglePauseSandbox(sandboxID string, pause bool) (*Sandbox, error) { func togglePauseSandbox(ctx context.Context, sandboxID string, pause bool) (*Sandbox, error) {
span, ctx := trace(ctx, "togglePauseSandbox")
defer span.Finish()
if sandboxID == "" { if sandboxID == "" {
return nil, errNeedSandbox return nil, errNeedSandbox
} }
@ -1522,7 +1573,7 @@ func togglePauseSandbox(sandboxID string, pause bool) (*Sandbox, error) {
defer unlockSandbox(lockFile) defer unlockSandbox(lockFile)
// Fetch the sandbox from storage and create it. // Fetch the sandbox from storage and create it.
s, err := fetchSandbox(sandboxID) s, err := fetchSandbox(ctx, sandboxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1544,6 +1595,9 @@ func togglePauseSandbox(sandboxID string, pause bool) (*Sandbox, error) {
// HotplugAddDevice is used for add a device to sandbox // HotplugAddDevice is used for add a device to sandbox
// Sandbox implement DeviceReceiver interface from device/api/interface.go // Sandbox implement DeviceReceiver interface from device/api/interface.go
func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType) error { func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType) error {
span, _ := s.trace("HotplugAddDevice")
defer span.Finish()
switch devType { switch devType {
case config.DeviceVFIO: case config.DeviceVFIO:
vfioDevices, ok := device.GetDeviceInfo().([]*config.VFIODev) vfioDevices, ok := device.GetDeviceInfo().([]*config.VFIODev)

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -51,7 +52,7 @@ func testCreateSandbox(t *testing.T, id string,
Containers: containers, Containers: containers,
} }
sandbox, err := createSandbox(sconfig, nil) sandbox, err := createSandbox(context.Background(), sconfig, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("Could not create sandbox: %s", err) return nil, fmt.Errorf("Could not create sandbox: %s", err)
} }
@ -585,7 +586,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
} }
// force state to be read from disk // force state to be read from disk
p2, err := fetchSandbox(p.ID()) p2, err := fetchSandbox(context.Background(), p.ID())
if err != nil { if err != nil {
t.Fatalf("Failed to fetch sandbox %v: %v", p.ID(), err) t.Fatalf("Failed to fetch sandbox %v: %v", p.ID(), err)
} }
@ -1184,7 +1185,7 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) {
} }
containers[c.id].sandbox = &sandbox containers[c.id].sandbox = &sandbox
err = sandbox.storage.createAllResources(&sandbox) err = sandbox.storage.createAllResources(context.Background(), &sandbox)
assert.Nil(t, err, "Error while create all resources for sandbox") assert.Nil(t, err, "Error while create all resources for sandbox")
err = sandbox.storeSandboxDevices() err = sandbox.storeSandboxDevices()
@ -1226,7 +1227,7 @@ func TestSandboxCreateAssets(t *testing.T) {
HypervisorConfig: hc, HypervisorConfig: hc,
} }
err = createAssets(p) err = createAssets(context.Background(), p)
assert.Nil(err) assert.Nil(err)
a, ok := p.HypervisorConfig.customAssets[kernelAsset] a, ok := p.HypervisorConfig.customAssets[kernelAsset]
@ -1242,7 +1243,7 @@ func TestSandboxCreateAssets(t *testing.T) {
HypervisorConfig: hc, HypervisorConfig: hc,
} }
err = createAssets(p) err = createAssets(context.Background(), p)
assert.NotNil(err) assert.NotNil(err)
} }

View File

@ -7,6 +7,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"syscall" "syscall"
@ -17,7 +18,7 @@ func TestBindMountInvalidSourceSymlink(t *testing.T) {
source := filepath.Join(testDir, "fooFile") source := filepath.Join(testDir, "fooFile")
os.Remove(source) os.Remove(source)
err := bindMount(source, "", false) err := bindMount(context.Background(), source, "", false)
if err == nil { if err == nil {
t.Fatal() t.Fatal()
} }
@ -39,7 +40,7 @@ func TestBindMountFailingMount(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
err = bindMount(source, "", false) err = bindMount(context.Background(), source, "", false)
if err == nil { if err == nil {
t.Fatal() t.Fatal()
} }
@ -66,7 +67,7 @@ func TestBindMountSuccessful(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
err = bindMount(source, dest, false) err = bindMount(context.Background(), source, dest, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -95,7 +96,7 @@ func TestBindMountReadonlySuccessful(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
err = bindMount(source, dest, true) err = bindMount(context.Background(), source, dest, true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
@ -41,7 +42,7 @@ func (c *VMConfig) Valid() error {
} }
// NewVM creates a new VM based on provided VMConfig. // NewVM creates a new VM based on provided VMConfig.
func NewVM(config VMConfig) (*VM, error) { func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
hypervisor, err := newHypervisor(config.HypervisorType) hypervisor, err := newHypervisor(config.HypervisorType)
if err != nil { if err != nil {
return nil, err return nil, err
@ -61,7 +62,7 @@ func NewVM(config VMConfig) (*VM, error) {
} }
}() }()
if err = hypervisor.init(id, &config.HypervisorConfig, Resources{}, &filesystem{}); err != nil { if err = hypervisor.init(ctx, id, &config.HypervisorConfig, Resources{}, &filesystem{}); err != nil {
return nil, err return nil, err
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"io/ioutil" "io/ioutil"
"testing" "testing"
@ -25,12 +26,14 @@ func TestNewVM(t *testing.T) {
ImagePath: testDir, ImagePath: testDir,
} }
ctx := context.Background()
var vm *VM var vm *VM
_, err := NewVM(config) _, err := NewVM(ctx, config)
assert.Error(err) assert.Error(err)
config.HypervisorConfig = hyperConfig config.HypervisorConfig = hyperConfig
vm, err = NewVM(config) vm, err = NewVM(ctx, config)
assert.Nil(err) assert.Nil(err)
// VM operations // VM operations
@ -55,15 +58,15 @@ func TestNewVM(t *testing.T) {
// template VM // template VM
config.HypervisorConfig.BootFromTemplate = true config.HypervisorConfig.BootFromTemplate = true
_, err = NewVM(config) _, err = NewVM(ctx, config)
assert.Error(err) assert.Error(err)
config.HypervisorConfig.MemoryPath = testDir config.HypervisorConfig.MemoryPath = testDir
_, err = NewVM(config) _, err = NewVM(ctx, config)
assert.Error(err) assert.Error(err)
config.HypervisorConfig.DevicesStatePath = testDir config.HypervisorConfig.DevicesStatePath = testDir
_, err = NewVM(config) _, err = NewVM(ctx, config)
assert.Nil(err) assert.Nil(err)
} }