runtime: Fix ordering of trace spans

A significant number of trace calls did not use a parent context that
would create proper span ordering in trace output. Add local context to
functions for use in trace calls to facilitate proper span ordering.
Additionally, change whether trace function returns context in some
functions in virtcontainers and use existing context rather than
background context in bindMount() so that span exists as a child of a
parent span.

Fixes #1355

Signed-off-by: Chelsea Mafrica <chelsea.e.mafrica@intel.com>
This commit is contained in:
Chelsea Mafrica 2021-02-09 16:30:50 -08:00
parent 50f317dcff
commit 6b0dc60dda
67 changed files with 1103 additions and 1056 deletions

View File

@ -73,7 +73,7 @@ func (s *cacheServer) GetBaseVM(ctx context.Context, empty *types.Empty) (*pb.Gr
return nil, errors.Wrapf(err, "failed to GetBaseVM") return nil, errors.Wrapf(err, "failed to GetBaseVM")
} }
return vm.ToGrpc(config) return vm.ToGrpc(ctx, config)
} }
func (s *cacheServer) quit() { func (s *cacheServer) quit() {

View File

@ -75,7 +75,7 @@ func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest) (*con
// create span // create span
var span otelTrace.Span var span otelTrace.Span
span, s.ctx = trace(s.ctx, "create") span, s.ctx = trace(ctx, "create")
defer span.End() defer span.End()
if rootFs.Mounted, err = checkAndMount(s, r); err != nil { if rootFs.Mounted, err = checkAndMount(s, r); err != nil {
@ -111,7 +111,7 @@ func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest) (*con
case vc.PodContainer: case vc.PodContainer:
var span otelTrace.Span var span otelTrace.Span
span, s.ctx = trace(s.ctx, "create") span, ctx = trace(s.ctx, "create")
defer span.End() defer span.End()
if s.sandbox == nil { if s.sandbox == nil {

View File

@ -17,12 +17,12 @@ import (
func deleteContainer(ctx context.Context, s *service, c *container) error { func deleteContainer(ctx context.Context, s *service, c *container) error {
if !c.cType.IsSandbox() { if !c.cType.IsSandbox() {
if c.status != task.StatusStopped { if c.status != task.StatusStopped {
if _, err := s.sandbox.StopContainer(c.id, false); err != nil && !isNotFound(err) { if _, err := s.sandbox.StopContainer(ctx, c.id, false); err != nil && !isNotFound(err) {
return err return err
} }
} }
if _, err := s.sandbox.DeleteContainer(c.id); err != nil && !isNotFound(err) { if _, err := s.sandbox.DeleteContainer(ctx, c.id); err != nil && !isNotFound(err) {
return err return err
} }
} }

View File

@ -6,6 +6,8 @@
package containerdshim package containerdshim
import ( import (
"context"
"github.com/containerd/cgroups" "github.com/containerd/cgroups"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
@ -13,8 +15,8 @@ import (
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers" vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
) )
func marshalMetrics(s *service, containerID string) (*google_protobuf.Any, error) { func marshalMetrics(ctx context.Context, s *service, containerID string) (*google_protobuf.Any, error) {
stats, err := s.sandbox.StatsContainer(containerID) stats, err := s.sandbox.StatsContainer(ctx, containerID)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -98,7 +98,7 @@ func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shi
go s.processExits() go s.processExits()
go s.forward(publisher) go s.forward(ctx, publisher)
return s, nil return s, nil
} }
@ -233,9 +233,9 @@ func (s *service) StartShim(ctx context.Context, id, containerdBinary, container
return address, nil return address, nil
} }
func (s *service) forward(publisher events.Publisher) { func (s *service) forward(ctx context.Context, publisher events.Publisher) {
for e := range s.events { for e := range s.events {
ctx, cancel := context.WithTimeout(s.ctx, timeOut) ctx, cancel := context.WithTimeout(ctx, timeOut)
err := publisher.Publish(ctx, getTopic(e), e) err := publisher.Publish(ctx, getTopic(e), e)
cancel() cancel()
if err != nil { if err != nil {
@ -300,7 +300,7 @@ func trace(ctx context.Context, name string) (otelTrace.Span, context.Context) {
} }
func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) { func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) {
span, _ := trace(s.ctx, "Cleanup") span, ctx := trace(ctx, "Cleanup")
defer span.End() defer span.End()
//Since the binary cleanup will return the DeleteResponse from stdout to //Since the binary cleanup will return the DeleteResponse from stdout to
@ -411,7 +411,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *
// Start a process // Start a process
func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) { func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) {
span, _ := trace(s.ctx, "Start") span, ctx := trace(ctx, "Start")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -462,7 +462,7 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP
// Delete the initial process and container // Delete the initial process and container
func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) { func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) {
span, _ := trace(s.ctx, "Delete") span, ctx := trace(ctx, "Delete")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -514,7 +514,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
// Exec an additional process inside the container // Exec an additional process inside the container
func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) { func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Exec") span, ctx := trace(ctx, "Exec")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -552,7 +552,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *p
// ResizePty of a process // ResizePty of a process
func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) { func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "ResizePty") span, ctx := trace(ctx, "ResizePty")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -581,7 +581,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_
processID = execs.id processID = execs.id
} }
err = s.sandbox.WinsizeProcess(c.id, processID, r.Height, r.Width) err = s.sandbox.WinsizeProcess(ctx, c.id, processID, r.Height, r.Width)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -591,7 +591,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_
// State returns runtime state information for a process // State returns runtime state information for a process
func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) { func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) {
span, _ := trace(s.ctx, "State") span, ctx := trace(ctx, "State")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -643,7 +643,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
// Pause the container // Pause the container
func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) { func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Pause") span, ctx := trace(ctx, "Pause")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -662,7 +662,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes
c.status = task.StatusPausing c.status = task.StatusPausing
err = s.sandbox.PauseContainer(r.ID) err = s.sandbox.PauseContainer(ctx, r.ID)
if err == nil { if err == nil {
c.status = task.StatusPaused c.status = task.StatusPaused
s.send(&eventstypes.TaskPaused{ s.send(&eventstypes.TaskPaused{
@ -682,7 +682,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes
// Resume the container // Resume the container
func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) { func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Resume") span, ctx := trace(ctx, "Resume")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -699,7 +699,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp
return nil, err return nil, err
} }
err = s.sandbox.ResumeContainer(c.id) err = s.sandbox.ResumeContainer(ctx, c.id)
if err == nil { if err == nil {
c.status = task.StatusRunning c.status = task.StatusRunning
s.send(&eventstypes.TaskResumed{ s.send(&eventstypes.TaskResumed{
@ -719,7 +719,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp
// Kill a process with the provided signal // Kill a process with the provided signal
func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) { func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Kill") span, ctx := trace(ctx, "Kill")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -773,14 +773,14 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E
return empty, nil return empty, nil
} }
return empty, s.sandbox.SignalProcess(c.id, processID, signum, r.All) return empty, s.sandbox.SignalProcess(ctx, c.id, processID, signum, r.All)
} }
// Pids returns all pids inside the container // Pids returns all pids inside the container
// Since for kata, it cannot get the process's pid from VM, // Since for kata, it cannot get the process's pid from VM,
// thus only return the Shim's pid directly. // thus only return the Shim's pid directly.
func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) { func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) {
span, _ := trace(s.ctx, "Pids") span, ctx := trace(ctx, "Pids")
defer span.End() defer span.End()
var processes []*task.ProcessInfo var processes []*task.ProcessInfo
@ -803,7 +803,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.
// CloseIO of a process // CloseIO of a process
func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) { func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "CloseIO") span, ctx := trace(ctx, "CloseIO")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -844,7 +844,7 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *pt
// Checkpoint the container // Checkpoint the container
func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) { func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Checkpoint") span, ctx := trace(ctx, "Checkpoint")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -858,7 +858,7 @@ func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskReque
// Connect returns shim information such as the shim's pid // Connect returns shim information such as the shim's pid
func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) { func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) {
span, _ := trace(s.ctx, "Connect") span, ctx := trace(ctx, "Connect")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -878,7 +878,7 @@ func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *ta
} }
func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) { func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Shutdown") span, ctx := trace(ctx, "Shutdown")
start := time.Now() start := time.Now()
defer func() { defer func() {
@ -906,7 +906,7 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *
} }
func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) { func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) {
span, _ := trace(s.ctx, "Stats") span, ctx := trace(ctx, "Stats")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -923,7 +923,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP
return nil, err return nil, err
} }
data, err := marshalMetrics(s, c.id) data, err := marshalMetrics(ctx, s, c.id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -935,7 +935,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP
// Update a running container // Update a running container
func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) { func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) {
span, _ := trace(s.ctx, "Update") span, ctx := trace(ctx, "Update")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -957,7 +957,7 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *
return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "Invalid resources type for %s", s.id) return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "Invalid resources type for %s", s.id)
} }
err = s.sandbox.UpdateContainer(r.ID, *resources) err = s.sandbox.UpdateContainer(ctx, r.ID, *resources)
if err != nil { if err != nil {
return nil, errdefs.ToGRPC(err) return nil, errdefs.ToGRPC(err)
} }
@ -967,7 +967,7 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *
// Wait for a process to exit // Wait for a process to exit
func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) { func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) {
span, _ := trace(s.ctx, "Wait") span, ctx := trace(ctx, "Wait")
defer span.End() defer span.End()
var ret uint32 var ret uint32

View File

@ -75,7 +75,8 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) {
} }
// get metrics from agent // get metrics from agent
agentMetrics, err := s.sandbox.GetAgentMetrics() // can not pass context to serveMetrics, so use background context
agentMetrics, err := s.sandbox.GetAgentMetrics(context.Background())
if err != nil { if err != nil {
shimMgtLog.WithError(err).Error("failed GetAgentMetrics") shimMgtLog.WithError(err).Error("failed GetAgentMetrics")
if isGRPCErrorCode(codes.NotFound, err) { if isGRPCErrorCode(codes.NotFound, err) {
@ -96,7 +97,7 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) {
// collect pod overhead metrics need sleep to get the changes of cpu/memory resources usage // collect pod overhead metrics need sleep to get the changes of cpu/memory resources usage
// so here only trigger the collect operation, and the data will be gathered // so here only trigger the collect operation, and the data will be gathered
// next time collection request from Prometheus server // next time collection request from Prometheus server
go s.setPodOverheadMetrics() go s.setPodOverheadMetrics(context.Background())
} }
func decodeAgentMetrics(body string) []*dto.MetricFamily { func decodeAgentMetrics(body string) []*dto.MetricFamily {

View File

@ -6,6 +6,7 @@
package containerdshim package containerdshim
import ( import (
"context"
"time" "time"
mutils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils" mutils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
@ -135,15 +136,15 @@ func updateShimMetrics() error {
} }
// statsSandbox returns a detailed sandbox stats. // statsSandbox returns a detailed sandbox stats.
func (s *service) statsSandbox() (vc.SandboxStats, []vc.ContainerStats, error) { func (s *service) statsSandbox(ctx context.Context) (vc.SandboxStats, []vc.ContainerStats, error) {
sandboxStats, err := s.sandbox.Stats() sandboxStats, err := s.sandbox.Stats(ctx)
if err != nil { if err != nil {
return vc.SandboxStats{}, []vc.ContainerStats{}, err return vc.SandboxStats{}, []vc.ContainerStats{}, err
} }
containerStats := []vc.ContainerStats{} containerStats := []vc.ContainerStats{}
for _, c := range s.sandbox.GetAllContainers() { for _, c := range s.sandbox.GetAllContainers() {
cstats, err := s.sandbox.StatsContainer(c.ID()) cstats, err := s.sandbox.StatsContainer(ctx, c.ID())
if err != nil { if err != nil {
return vc.SandboxStats{}, []vc.ContainerStats{}, err return vc.SandboxStats{}, []vc.ContainerStats{}, err
} }
@ -179,9 +180,9 @@ func calcOverhead(initialSandboxStats, finishSandboxStats vc.SandboxStats, initi
return float64(hostMemoryUsage - guestMemoryUsage), float64(cpuUsageHost - cpuUsageGuest) return float64(hostMemoryUsage - guestMemoryUsage), float64(cpuUsageHost - cpuUsageGuest)
} }
func (s *service) getPodOverhead() (float64, float64, error) { func (s *service) getPodOverhead(ctx context.Context) (float64, float64, error) {
initTime := time.Now().UnixNano() initTime := time.Now().UnixNano()
initialSandboxStats, initialContainerStats, err := s.statsSandbox() initialSandboxStats, initialContainerStats, err := s.statsSandbox(ctx)
if err != nil { if err != nil {
return 0, 0, err return 0, 0, err
} }
@ -191,7 +192,7 @@ func (s *service) getPodOverhead() (float64, float64, error) {
finishtTime := time.Now().UnixNano() finishtTime := time.Now().UnixNano()
deltaTime := float64(finishtTime - initTime) deltaTime := float64(finishtTime - initTime)
finishSandboxStats, finishContainersStats, err := s.statsSandbox() finishSandboxStats, finishContainersStats, err := s.statsSandbox(ctx)
if err != nil { if err != nil {
return 0, 0, err return 0, 0, err
} }
@ -199,8 +200,8 @@ func (s *service) getPodOverhead() (float64, float64, error) {
return mem, cpu, nil return mem, cpu, nil
} }
func (s *service) setPodOverheadMetrics() error { func (s *service) setPodOverheadMetrics(ctx context.Context) error {
mem, cpu, err := s.getPodOverhead() mem, cpu, err := s.getPodOverhead(ctx)
if err != nil { if err != nil {
return err return err
} }

View File

@ -26,22 +26,22 @@ func startContainer(ctx context.Context, s *service, c *container) error {
} }
if c.cType.IsSandbox() { if c.cType.IsSandbox() {
err := s.sandbox.Start() err := s.sandbox.Start(ctx)
if err != nil { if err != nil {
return err return err
} }
// Start monitor after starting sandbox // Start monitor after starting sandbox
s.monitor, err = s.sandbox.Monitor() s.monitor, err = s.sandbox.Monitor(ctx)
if err != nil { if err != nil {
return err return err
} }
go watchSandbox(s) go watchSandbox(ctx, s)
// We don't rely on the context passed to startContainer as it can be cancelled after // We don't rely on the context passed to startContainer as it can be cancelled after
// this rpc call. // this rpc call.
go watchOOMEvents(s.ctx, s) go watchOOMEvents(ctx, s)
} else { } else {
_, err := s.sandbox.StartContainer(c.id) _, err := s.sandbox.StartContainer(ctx, c.id)
if err != nil { if err != nil {
return err return err
} }
@ -82,7 +82,7 @@ func startContainer(ctx context.Context, s *service, c *container) error {
close(c.stdinCloser) close(c.stdinCloser)
} }
go wait(s, c, "") go wait(ctx, s, c, "")
return nil return nil
} }
@ -99,7 +99,7 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (*ex
return nil, err return nil, err
} }
_, proc, err := s.sandbox.EnterContainer(containerID, *execs.cmds) _, proc, err := s.sandbox.EnterContainer(ctx, containerID, *execs.cmds)
if err != nil { if err != nil {
err := fmt.Errorf("cannot enter container %s, with err %s", containerID, err) err := fmt.Errorf("cannot enter container %s, with err %s", containerID, err)
return nil, err return nil, err
@ -108,7 +108,7 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (*ex
execs.status = task.StatusRunning execs.status = task.StatusRunning
if execs.tty.height != 0 && execs.tty.width != 0 { if execs.tty.height != 0 && execs.tty.width != 0 {
err = s.sandbox.WinsizeProcess(c.id, execs.id, execs.tty.height, execs.tty.width) err = s.sandbox.WinsizeProcess(ctx, c.id, execs.id, execs.tty.height, execs.tty.width)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -129,7 +129,7 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (*ex
go ioCopy(execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr) go ioCopy(execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr)
go wait(s, c, execID) go wait(ctx, s, c, execID)
return execs, nil return execs, nil
} }

View File

@ -22,7 +22,7 @@ import (
const defaultCheckInterval = 1 * time.Second const defaultCheckInterval = 1 * time.Second
func wait(s *service, c *container, execID string) (int32, error) { func wait(ctx context.Context, s *service, c *container, execID string) (int32, error) {
var execs *exec var execs *exec
var err error var err error
@ -43,7 +43,7 @@ func wait(s *service, c *container, execID string) (int32, error) {
processID = execs.id processID = execs.id
} }
ret, err := s.sandbox.WaitProcess(c.id, processID) ret, err := s.sandbox.WaitProcess(ctx, c.id, processID)
if err != nil { if err != nil {
shimLog.WithError(err).WithFields(logrus.Fields{ shimLog.WithError(err).WithFields(logrus.Fields{
"container": c.id, "container": c.id,
@ -65,15 +65,15 @@ func wait(s *service, c *container, execID string) (int32, error) {
if s.monitor != nil { if s.monitor != nil {
s.monitor <- nil s.monitor <- nil
} }
if err = s.sandbox.Stop(true); err != nil { if err = s.sandbox.Stop(ctx, true); err != nil {
shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to stop sandbox") shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to stop sandbox")
} }
if err = s.sandbox.Delete(); err != nil { if err = s.sandbox.Delete(ctx); err != nil {
shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to delete sandbox") shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to delete sandbox")
} }
} else { } else {
if _, err = s.sandbox.StopContainer(c.id, false); err != nil { if _, err = s.sandbox.StopContainer(ctx, c.id, false); err != nil {
shimLog.WithError(err).WithField("container", c.id).Warn("stop container failed") shimLog.WithError(err).WithField("container", c.id).Warn("stop container failed")
} }
} }
@ -97,7 +97,7 @@ func wait(s *service, c *container, execID string) (int32, error) {
return ret, nil return ret, nil
} }
func watchSandbox(s *service) { func watchSandbox(ctx context.Context, s *service) {
if s.monitor == nil { if s.monitor == nil {
return return
} }
@ -111,11 +111,11 @@ func watchSandbox(s *service) {
defer s.mu.Unlock() defer s.mu.Unlock()
// sandbox malfunctioning, cleanup as much as we can // sandbox malfunctioning, cleanup as much as we can
shimLog.WithError(err).Warn("sandbox stopped unexpectedly") shimLog.WithError(err).Warn("sandbox stopped unexpectedly")
err = s.sandbox.Stop(true) err = s.sandbox.Stop(ctx, true)
if err != nil { if err != nil {
shimLog.WithError(err).Warn("stop sandbox failed") shimLog.WithError(err).Warn("stop sandbox failed")
} }
err = s.sandbox.Delete() err = s.sandbox.Delete(ctx)
if err != nil { if err != nil {
shimLog.WithError(err).Warn("delete sandbox failed") shimLog.WithError(err).Warn("delete sandbox failed")
} }
@ -145,7 +145,7 @@ func watchOOMEvents(ctx context.Context, s *service) {
case <-ctx.Done(): case <-ctx.Done():
return return
default: default:
containerID, err := s.sandbox.GetOOMEvent() containerID, err := s.sandbox.GetOOMEvent(ctx)
if err != nil { if err != nil {
shimLog.WithError(err).Warn("failed to get OOM event from sandbox") shimLog.WithError(err).Warn("failed to get OOM event from sandbox")
// If the GetOOMEvent call is not implemented, then the agent is most likely an older version, // If the GetOOMEvent call is not implemented, then the agent is most likely an older version,

View File

@ -230,7 +230,7 @@ func CreateContainer(ctx context.Context, sandbox vc.VCSandbox, ociSpec specs.Sp
span.SetAttributes(label.Key("sandbox").String(sandboxID)) span.SetAttributes(label.Key("sandbox").String(sandboxID))
c, err = sandbox.CreateContainer(contConfig) c, err = sandbox.CreateContainer(ctx, contConfig)
if err != nil { if err != nil {
return vc.Process{}, err return vc.Process{}, err
} }

View File

@ -149,8 +149,8 @@ func (a *Acrn) kernelParameters() string {
} }
// Adds all capabilities supported by Acrn implementation of hypervisor interface // Adds all capabilities supported by Acrn implementation of hypervisor interface
func (a *Acrn) capabilities() types.Capabilities { func (a *Acrn) capabilities(ctx context.Context) types.Capabilities {
span, _ := a.trace("capabilities") span, _ := a.trace(ctx, "capabilities")
defer span.End() defer span.End()
return a.arch.capabilities() return a.arch.capabilities()
@ -207,14 +207,14 @@ func (a *Acrn) Logger() *logrus.Entry {
return virtLog.WithField("subsystem", "acrn") return virtLog.WithField("subsystem", "acrn")
} }
func (a *Acrn) trace(name string) (otelTrace.Span, context.Context) { func (a *Acrn) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if a.ctx == nil { if parent == nil {
a.Logger().WithField("type", "bug").Error("trace called before context set") a.Logger().WithField("type", "bug").Error("trace called before context set")
a.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(a.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes([]label.KeyValue{label.Key("subsystem").String("hypervisor"), label.Key("type").String("acrn")}...) span.SetAttributes([]label.KeyValue{label.Key("subsystem").String("hypervisor"), label.Key("type").String("acrn")}...)
return span, ctx return span, ctx
@ -248,14 +248,14 @@ func (a *Acrn) appendImage(devices []Device, imagePath string) ([]Device, error)
return devices, nil return devices, nil
} }
func (a *Acrn) buildDevices(imagePath string) ([]Device, error) { func (a *Acrn) buildDevices(ctx context.Context, imagePath string) ([]Device, error) {
var devices []Device var devices []Device
if imagePath == "" { if imagePath == "" {
return nil, fmt.Errorf("Image Path should not be empty: %s", imagePath) return nil, fmt.Errorf("Image Path should not be empty: %s", imagePath)
} }
_, console, err := a.getSandboxConsole(a.id) _, console, err := a.getSandboxConsole(ctx, a.id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -278,7 +278,7 @@ func (a *Acrn) buildDevices(imagePath string) ([]Device, error) {
// holder for container rootfs (as acrn doesn't support hot-plug). // holder for container rootfs (as acrn doesn't support hot-plug).
// Once the container rootfs is known, replace the dummy backend // Once the container rootfs is known, replace the dummy backend
// with actual path (using block rescan feature in acrn) // with actual path (using block rescan feature in acrn)
devices, err = a.createDummyVirtioBlkDev(devices) devices, err = a.createDummyVirtioBlkDev(ctx, devices)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -287,8 +287,8 @@ func (a *Acrn) buildDevices(imagePath string) ([]Device, error) {
} }
// setup sets the Acrn structure up. // setup sets the Acrn structure up.
func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error { func (a *Acrn) setup(ctx context.Context, id string, hypervisorConfig *HypervisorConfig) error {
span, _ := a.trace("setup") span, _ := a.trace(ctx, "setup")
defer span.End() defer span.End()
err := hypervisorConfig.valid() err := hypervisorConfig.valid()
@ -330,8 +330,8 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error {
return nil return nil
} }
func (a *Acrn) createDummyVirtioBlkDev(devices []Device) ([]Device, error) { func (a *Acrn) createDummyVirtioBlkDev(ctx context.Context, devices []Device) ([]Device, error) {
span, _ := a.trace("createDummyVirtioBlkDev") span, _ := a.trace(ctx, "createDummyVirtioBlkDev")
defer span.End() defer span.End()
// Since acrn doesn't support hot-plug, dummy virtio-blk // Since acrn doesn't support hot-plug, dummy virtio-blk
@ -354,10 +354,11 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
// Save the tracing context // Save the tracing context
a.ctx = ctx a.ctx = ctx
span, _ := a.trace("createSandbox") var span otelTrace.Span
span, ctx = a.trace(ctx, "createSandbox")
defer span.End() defer span.End()
if err := a.setup(id, hypervisorConfig); err != nil { if err := a.setup(ctx, id, hypervisorConfig); err != nil {
return err return err
} }
@ -386,7 +387,7 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
return fmt.Errorf("ACRN UUID should not be empty") return fmt.Errorf("ACRN UUID should not be empty")
} }
devices, err := a.buildDevices(imagePath) devices, err := a.buildDevices(ctx, imagePath)
if err != nil { if err != nil {
return err return err
} }
@ -418,8 +419,8 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
} }
// startSandbox will start the Sandbox's VM. // startSandbox will start the Sandbox's VM.
func (a *Acrn) startSandbox(timeoutSecs int) error { func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error {
span, _ := a.trace("startSandbox") span, ctx := a.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if a.config.Debug { if a.config.Debug {
@ -455,7 +456,7 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
} }
a.state.PID = PID a.state.PID = PID
if err = a.waitSandbox(timeoutSecs); err != nil { if err = a.waitSandbox(ctx, timeoutSecs); err != nil {
a.Logger().WithField("acrn wait failed:", err).Debug() a.Logger().WithField("acrn wait failed:", err).Debug()
return err return err
} }
@ -464,8 +465,8 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
} }
// waitSandbox will wait for the Sandbox's VM to be up and running. // waitSandbox will wait for the Sandbox's VM to be up and running.
func (a *Acrn) waitSandbox(timeoutSecs int) error { func (a *Acrn) waitSandbox(ctx context.Context, timeoutSecs int) error {
span, _ := a.trace("waitSandbox") span, _ := a.trace(ctx, "waitSandbox")
defer span.End() defer span.End()
if timeoutSecs < 0 { if timeoutSecs < 0 {
@ -478,8 +479,8 @@ func (a *Acrn) waitSandbox(timeoutSecs int) error {
} }
// stopSandbox will stop the Sandbox's VM. // stopSandbox will stop the Sandbox's VM.
func (a *Acrn) stopSandbox() (err error) { func (a *Acrn) stopSandbox(ctx context.Context) (err error) {
span, _ := a.trace("stopSandbox") span, _ := a.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
a.Logger().Info("Stopping acrn VM") a.Logger().Info("Stopping acrn VM")
@ -568,8 +569,8 @@ func (a *Acrn) updateBlockDevice(drive *config.BlockDrive) error {
return err return err
} }
func (a *Acrn) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := a.trace("hotplugAddDevice") span, ctx := a.trace(ctx, "hotplugAddDevice")
defer span.End() defer span.End()
switch devType { switch devType {
@ -582,8 +583,8 @@ func (a *Acrn) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
} }
} }
func (a *Acrn) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := a.trace("hotplugRemoveDevice") span, ctx := a.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
// Not supported. return success // Not supported. return success
@ -591,8 +592,8 @@ func (a *Acrn) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int
return nil, nil return nil, nil
} }
func (a *Acrn) pauseSandbox() error { func (a *Acrn) pauseSandbox(ctx context.Context) error {
span, _ := a.trace("pauseSandbox") span, _ := a.trace(ctx, "pauseSandbox")
defer span.End() defer span.End()
// Not supported. return success // Not supported. return success
@ -600,8 +601,8 @@ func (a *Acrn) pauseSandbox() error {
return nil return nil
} }
func (a *Acrn) resumeSandbox() error { func (a *Acrn) resumeSandbox(ctx context.Context) error {
span, _ := a.trace("resumeSandbox") span, _ := a.trace(ctx, "resumeSandbox")
defer span.End() defer span.End()
// Not supported. return success // Not supported. return success
@ -610,9 +611,9 @@ func (a *Acrn) resumeSandbox() error {
} }
// addDevice will add extra devices to acrn command line. // addDevice will add extra devices to acrn command line.
func (a *Acrn) addDevice(devInfo interface{}, devType deviceType) error { func (a *Acrn) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
var err error var err error
span, _ := a.trace("addDevice") span, _ := a.trace(ctx, "addDevice")
defer span.End() defer span.End()
switch v := devInfo.(type) { switch v := devInfo.(type) {
@ -644,8 +645,8 @@ func (a *Acrn) addDevice(devInfo interface{}, devType deviceType) error {
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (a *Acrn) getSandboxConsole(id string) (string, string, error) { func (a *Acrn) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
span, _ := a.trace("getSandboxConsole") span, _ := a.trace(ctx, "getSandboxConsole")
defer span.End() defer span.End()
consoleURL, err := utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket) consoleURL, err := utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket)
@ -664,15 +665,15 @@ func (a *Acrn) saveSandbox() error {
return nil return nil
} }
func (a *Acrn) disconnect() { func (a *Acrn) disconnect(ctx context.Context) {
span, _ := a.trace("disconnect") span, _ := a.trace(ctx, "disconnect")
defer span.End() defer span.End()
// Not supported. // Not supported.
} }
func (a *Acrn) getThreadIDs() (vcpuThreadIDs, error) { func (a *Acrn) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
span, _ := a.trace("getThreadIDs") span, _ := a.trace(ctx, "getThreadIDs")
defer span.End() defer span.End()
// Not supported. return success // Not supported. return success
@ -681,16 +682,16 @@ func (a *Acrn) getThreadIDs() (vcpuThreadIDs, error) {
return vcpuThreadIDs{}, nil return vcpuThreadIDs{}, nil
} }
func (a *Acrn) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) { func (a *Acrn) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
return 0, memoryDevice{}, nil return 0, memoryDevice{}, nil
} }
func (a *Acrn) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (a *Acrn) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
return 0, 0, nil return 0, 0, nil
} }
func (a *Acrn) cleanup() error { func (a *Acrn) cleanup(ctx context.Context) error {
span, _ := a.trace("cleanup") span, _ := a.trace(ctx, "cleanup")
defer span.End() defer span.End()
return nil return nil
@ -704,7 +705,7 @@ func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
return errors.New("acrn is not supported by VM cache") return errors.New("acrn is not supported by VM cache")
} }
func (a *Acrn) toGrpc() ([]byte, error) { func (a *Acrn) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("acrn is not supported by VM cache") return nil, errors.New("acrn is not supported by VM cache")
} }

View File

@ -89,7 +89,7 @@ func testAcrnAddDevice(t *testing.T, devInfo interface{}, devType deviceType, ex
arch: &acrnArchBase{}, arch: &acrnArchBase{},
} }
err := a.addDevice(devInfo, devType) err := a.addDevice(context.Background(), devInfo, devType)
assert.NoError(err) assert.NoError(err)
assert.Exactly(a.acrnConfig.Devices, expected) assert.Exactly(a.acrnConfig.Devices, expected)
} }

View File

@ -74,13 +74,13 @@ type agent interface {
capabilities() types.Capabilities capabilities() types.Capabilities
// check will check the agent liveness // check will check the agent liveness
check() error check(ctx context.Context) error
// tell whether the agent is long live connected or not // tell whether the agent is long live connected or not
longLiveConn() bool longLiveConn() bool
// disconnect will disconnect the connection to the agent // disconnect will disconnect the connection to the agent
disconnect() error disconnect(ctx context.Context) error
// get agent url // get agent url
getAgentURL() (string, error) getAgentURL() (string, error)
@ -92,111 +92,111 @@ type agent interface {
reuseAgent(agent agent) error reuseAgent(agent agent) error
// createSandbox will tell the agent to perform necessary setup for a Sandbox. // createSandbox will tell the agent to perform necessary setup for a Sandbox.
createSandbox(sandbox *Sandbox) error createSandbox(ctx context.Context, sandbox *Sandbox) error
// exec will tell the agent to run a command in an already running container. // exec will tell the agent to run a command in an already running container.
exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error)
// startSandbox will tell the agent to start all containers related to the Sandbox. // startSandbox will tell the agent to start all containers related to the Sandbox.
startSandbox(sandbox *Sandbox) error startSandbox(ctx context.Context, sandbox *Sandbox) error
// stopSandbox will tell the agent to stop all containers related to the Sandbox. // stopSandbox will tell the agent to stop all containers related to the Sandbox.
stopSandbox(sandbox *Sandbox) error stopSandbox(ctx context.Context, sandbox *Sandbox) error
// createContainer will tell the agent to create a container related to a Sandbox. // createContainer will tell the agent to create a container related to a Sandbox.
createContainer(sandbox *Sandbox, c *Container) (*Process, error) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (*Process, error)
// startContainer will tell the agent to start a container related to a Sandbox. // startContainer will tell the agent to start a container related to a Sandbox.
startContainer(sandbox *Sandbox, c *Container) error startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error
// stopContainer will tell the agent to stop a container related to a Sandbox. // stopContainer will tell the agent to stop a container related to a Sandbox.
stopContainer(sandbox *Sandbox, c Container) error stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error
// signalProcess will tell the agent to send a signal to a // signalProcess will tell the agent to send a signal to a
// container or a process related to a Sandbox. If all is true, all processes in // container or a process related to a Sandbox. If all is true, all processes in
// the container will be sent the signal. // the container will be sent the signal.
signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error signalProcess(ctx context.Context, c *Container, processID string, signal syscall.Signal, all bool) error
// winsizeProcess will tell the agent to set a process' tty size // winsizeProcess will tell the agent to set a process' tty size
winsizeProcess(c *Container, processID string, height, width uint32) error winsizeProcess(ctx context.Context, c *Container, processID string, height, width uint32) error
// writeProcessStdin will tell the agent to write a process stdin // writeProcessStdin will tell the agent to write a process stdin
writeProcessStdin(c *Container, ProcessID string, data []byte) (int, error) writeProcessStdin(ctx context.Context, c *Container, ProcessID string, data []byte) (int, error)
// closeProcessStdin will tell the agent to close a process stdin // closeProcessStdin will tell the agent to close a process stdin
closeProcessStdin(c *Container, ProcessID string) error closeProcessStdin(ctx context.Context, c *Container, ProcessID string) error
// readProcessStdout will tell the agent to read a process stdout // readProcessStdout will tell the agent to read a process stdout
readProcessStdout(c *Container, processID string, data []byte) (int, error) readProcessStdout(ctx context.Context, c *Container, processID string, data []byte) (int, error)
// readProcessStderr will tell the agent to read a process stderr // readProcessStderr will tell the agent to read a process stderr
readProcessStderr(c *Container, processID string, data []byte) (int, error) readProcessStderr(ctx context.Context, c *Container, processID string, data []byte) (int, error)
// processListContainer will list the processes running inside the container // processListContainer will list the processes running inside the container
processListContainer(sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) processListContainer(ctx context.Context, sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error)
// updateContainer will update the resources of a running container // updateContainer will update the resources of a running container
updateContainer(sandbox *Sandbox, c Container, resources specs.LinuxResources) error updateContainer(ctx context.Context, sandbox *Sandbox, c Container, resources specs.LinuxResources) error
// waitProcess will wait for the exit code of a process // waitProcess will wait for the exit code of a process
waitProcess(c *Container, processID string) (int32, error) waitProcess(ctx context.Context, c *Container, processID string) (int32, error)
// onlineCPUMem will online CPUs and Memory inside the Sandbox. // onlineCPUMem will online CPUs and Memory inside the Sandbox.
// This function should be called after hot adding vCPUs or Memory. // This function should be called after hot adding vCPUs or Memory.
// cpus specifies the number of CPUs that were added and the agent should online // cpus specifies the number of CPUs that were added and the agent should online
// cpuOnly specifies that we should online cpu or online memory or both // cpuOnly specifies that we should online cpu or online memory or both
onlineCPUMem(cpus uint32, cpuOnly bool) error onlineCPUMem(ctx context.Context, cpus uint32, cpuOnly bool) error
// memHotplugByProbe will notify the guest kernel about memory hotplug event through // memHotplugByProbe will notify the guest kernel about memory hotplug event through
// probe interface. // probe interface.
// This function should be called after hot adding Memory and before online memory. // This function should be called after hot adding Memory and before online memory.
// addr specifies the address of the recently hotplugged or unhotplugged memory device. // addr specifies the address of the recently hotplugged or unhotplugged memory device.
memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error memHotplugByProbe(ctx context.Context, addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error
// statsContainer will tell the agent to get stats from a container related to a Sandbox // statsContainer will tell the agent to get stats from a container related to a Sandbox
statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) statsContainer(ctx context.Context, sandbox *Sandbox, c Container) (*ContainerStats, error)
// pauseContainer will pause a container // pauseContainer will pause a container
pauseContainer(sandbox *Sandbox, c Container) error pauseContainer(ctx context.Context, sandbox *Sandbox, c Container) error
// resumeContainer will resume a paused container // resumeContainer will resume a paused container
resumeContainer(sandbox *Sandbox, c Container) error resumeContainer(ctx context.Context, sandbox *Sandbox, c Container) error
// configure will update agent settings based on provided arguments // configure will update agent settings based on provided arguments
configure(h hypervisor, id, sharePath string, config interface{}) error configure(ctx context.Context, h hypervisor, id, sharePath string, config interface{}) error
// configureFromGrpc will update agent settings based on provided arguments which from Grpc // configureFromGrpc will update agent settings based on provided arguments which from Grpc
configureFromGrpc(h hypervisor, id string, config interface{}) error configureFromGrpc(h hypervisor, id string, config interface{}) error
// reseedRNG will reseed the guest random number generator // reseedRNG will reseed the guest random number generator
reseedRNG(data []byte) error reseedRNG(ctx context.Context, data []byte) error
// updateInterface will tell the agent to update a nic for an existed Sandbox. // updateInterface will tell the agent to update a nic for an existed Sandbox.
updateInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) updateInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error)
// listInterfaces will tell the agent to list interfaces of an existed Sandbox // listInterfaces will tell the agent to list interfaces of an existed Sandbox
listInterfaces() ([]*pbTypes.Interface, error) listInterfaces(ctx context.Context) ([]*pbTypes.Interface, error)
// updateRoutes will tell the agent to update route table for an existed Sandbox. // updateRoutes will tell the agent to update route table for an existed Sandbox.
updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) updateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error)
// listRoutes will tell the agent to list routes of an existed Sandbox // listRoutes will tell the agent to list routes of an existed Sandbox
listRoutes() ([]*pbTypes.Route, error) listRoutes(ctx context.Context) ([]*pbTypes.Route, error)
// getGuestDetails will tell the agent to get some information of guest // getGuestDetails will tell the agent to get some information of guest
getGuestDetails(*grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) getGuestDetails(context.Context, *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error)
// setGuestDateTime asks the agent to set guest time to the provided one // setGuestDateTime asks the agent to set guest time to the provided one
setGuestDateTime(time.Time) error setGuestDateTime(context.Context, time.Time) error
// copyFile copies file from host to container's rootfs // copyFile copies file from host to container's rootfs
copyFile(src, dst string) error copyFile(ctx context.Context, src, dst string) error
// markDead tell agent that the guest is dead // markDead tell agent that the guest is dead
markDead() markDead(ctx context.Context)
// cleanup removes all on disk information generated by the agent // cleanup removes all on disk information generated by the agent
cleanup(s *Sandbox) cleanup(ctx context.Context, s *Sandbox)
// return data for saving // return data for saving
save() persistapi.AgentState save() persistapi.AgentState
@ -206,8 +206,8 @@ type agent interface {
// getOOMEvent will wait on OOM events that occur in the sandbox. // getOOMEvent will wait on OOM events that occur in the sandbox.
// Will return the ID of the container where the event occurred. // Will return the ID of the container where the event occurred.
getOOMEvent() (string, error) getOOMEvent(ctx context.Context, ) (string, error)
// getAgentMetrics get metrics of agent and guest through agent // getAgentMetrics get metrics of agent and guest through agent
getAgentMetrics(*grpc.GetMetricsRequest) (*grpc.Metrics, error) getAgentMetrics(context.Context, *grpc.GetMetricsRequest) (*grpc.Metrics, error)
} }

View File

@ -74,19 +74,19 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
// cleanup sandbox resources in case of any failure // cleanup sandbox resources in case of any failure
defer func() { defer func() {
if err != nil { if err != nil {
s.Delete() s.Delete(ctx)
} }
}() }()
// Create the sandbox network // Create the sandbox network
if err = s.createNetwork(); err != nil { if err = s.createNetwork(ctx); err != nil {
return nil, err return nil, err
} }
// network rollback // network rollback
defer func() { defer func() {
if err != nil { if err != nil {
s.removeNetwork() s.removeNetwork(ctx)
} }
}() }()
@ -102,30 +102,30 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
} }
// Start the VM // Start the VM
if err = s.startVM(); err != nil { if err = s.startVM(ctx); err != nil {
return nil, err return nil, err
} }
// rollback to stop VM if error occurs // rollback to stop VM if error occurs
defer func() { defer func() {
if err != nil { if err != nil {
s.stopVM() s.stopVM(ctx)
} }
}() }()
s.postCreatedNetwork() s.postCreatedNetwork(ctx)
if err = s.getAndStoreGuestDetails(); err != nil { if err = s.getAndStoreGuestDetails(ctx); err != nil {
return nil, err return nil, err
} }
// Create Containers // Create Containers
if err = s.createContainers(); err != nil { if err = s.createContainers(ctx); err != nil {
return nil, err return nil, err
} }
// The sandbox is completely created now, we can store it. // The sandbox is completely created now, we can store it.
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return nil, err return nil, err
} }
@ -157,14 +157,14 @@ func CleanupContainer(ctx context.Context, sandboxID, containerID string, force
if err != nil { if err != nil {
return err return err
} }
defer s.Release() defer s.Release(ctx)
_, err = s.StopContainer(containerID, force) _, err = s.StopContainer(ctx, containerID, force)
if err != nil && !force { if err != nil && !force {
return err return err
} }
_, err = s.DeleteContainer(containerID) _, err = s.DeleteContainer(ctx, containerID)
if err != nil && !force { if err != nil && !force {
return err return err
} }
@ -173,11 +173,11 @@ func CleanupContainer(ctx context.Context, sandboxID, containerID string, force
return nil return nil
} }
if err = s.Stop(force); err != nil && !force { if err = s.Stop(ctx, force); err != nil && !force {
return err return err
} }
if err = s.Delete(); err != nil { if err = s.Delete(ctx); err != nil {
return err return err
} }

View File

@ -295,12 +295,12 @@ func TestCleanupContainer(t *testing.T) {
for _, contID := range contIDs { for _, contID := range contIDs {
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
c, err := p.CreateContainer(contConfig) c, err := s.CreateContainer(context.Background(), contConfig)
if c == nil || err != nil { if c == nil || err != nil {
t.Fatal(err) t.Fatal(err)
} }
c, err = p.StartContainer(c.ID()) c, err = p.StartContainer(context.Background(), c.ID())
if c == nil || err != nil { if c == nil || err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/ns"
@ -87,19 +88,19 @@ func (endpoint *BridgedMacvlanEndpoint) NetworkPair() *NetworkInterfacePair {
// Attach for virtual endpoint bridges the network pair and adds the // Attach for virtual endpoint bridges the network pair and adds the
// tap interface of the network pair to the hypervisor. // tap interface of the network pair to the hypervisor.
func (endpoint *BridgedMacvlanEndpoint) Attach(s *Sandbox) error { func (endpoint *BridgedMacvlanEndpoint) Attach(ctx context.Context, s *Sandbox) error {
h := s.hypervisor h := s.hypervisor
if err := xConnectVMNetwork(endpoint, h); err != nil { if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
networkLogger().WithError(err).Error("Error bridging virtual ep") networkLogger().WithError(err).Error("Error bridging virtual ep")
return err return err
} }
return h.addDevice(endpoint, netDev) return h.addDevice(ctx, endpoint, netDev)
} }
// Detach for the virtual endpoint tears down the tap and bridge // Detach for the virtual endpoint tears down the tap and bridge
// created for the veth interface. // created for the veth interface.
func (endpoint *BridgedMacvlanEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *BridgedMacvlanEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
// The network namespace would have been deleted at this point // The network namespace would have been deleted at this point
// if it has not been created by virtcontainers. // if it has not been created by virtcontainers.
if !netNsCreated { if !netNsCreated {
@ -112,12 +113,12 @@ func (endpoint *BridgedMacvlanEndpoint) Detach(netNsCreated bool, netNsPath stri
} }
// HotAttach for physical endpoint not supported yet // HotAttach for physical endpoint not supported yet
func (endpoint *BridgedMacvlanEndpoint) HotAttach(h hypervisor) error { func (endpoint *BridgedMacvlanEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot attach") return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot attach")
} }
// HotDetach for physical endpoint not supported yet // HotDetach for physical endpoint not supported yet
func (endpoint *BridgedMacvlanEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *BridgedMacvlanEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot detach") return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot detach")
} }

View File

@ -169,7 +169,8 @@ func (clh *cloudHypervisor) checkVersion() error {
func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error { func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
clh.ctx = ctx clh.ctx = ctx
span, _ := clh.trace("createSandbox") var span otelTrace.Span
span, clh.ctx = clh.trace(clh.ctx, "createSandbox")
defer span.End() defer span.End()
err := hypervisorConfig.valid() err := hypervisorConfig.valid()
@ -337,8 +338,8 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
} }
// startSandbox will start the VMM and boot the virtual machine for the given sandbox. // startSandbox will start the VMM and boot the virtual machine for the given sandbox.
func (clh *cloudHypervisor) startSandbox(timeout int) error { func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error {
span, _ := clh.trace("startSandbox") span, ctx := clh.trace(ctx, "startSandbox")
defer span.End() defer span.End()
ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second)
@ -378,7 +379,7 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
pid, err := clh.LaunchClh() pid, err := clh.LaunchClh()
if err != nil { if err != nil {
if shutdownErr := clh.virtiofsd.Stop(); shutdownErr != nil { if shutdownErr := clh.virtiofsd.Stop(ctx); shutdownErr != nil {
clh.Logger().WithField("error", shutdownErr).Warn("error shutting down Virtiofsd") clh.Logger().WithField("error", shutdownErr).Warn("error shutting down Virtiofsd")
} }
return fmt.Errorf("failed to launch cloud-hypervisor: %q", err) return fmt.Errorf("failed to launch cloud-hypervisor: %q", err)
@ -395,7 +396,7 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (clh *cloudHypervisor) getSandboxConsole(id string) (string, string, error) { func (clh *cloudHypervisor) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
clh.Logger().WithField("function", "getSandboxConsole").WithField("id", id).Info("Get Sandbox Console") clh.Logger().WithField("function", "getSandboxConsole").WithField("id", id).Info("Get Sandbox Console")
master, slave, err := console.NewPty() master, slave, err := console.NewPty()
if err != nil { if err != nil {
@ -407,11 +408,11 @@ func (clh *cloudHypervisor) getSandboxConsole(id string) (string, string, error)
return consoleProtoPty, slave, nil return consoleProtoPty, slave, nil
} }
func (clh *cloudHypervisor) disconnect() { func (clh *cloudHypervisor) disconnect(ctx context.Context) {
clh.Logger().WithField("function", "disconnect").Info("Disconnecting Sandbox Console") clh.Logger().WithField("function", "disconnect").Info("Disconnecting Sandbox Console")
} }
func (clh *cloudHypervisor) getThreadIDs() (vcpuThreadIDs, error) { func (clh *cloudHypervisor) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
clh.Logger().WithField("function", "getThreadIDs").Info("get thread ID's") clh.Logger().WithField("function", "getThreadIDs").Info("get thread ID's")
@ -473,8 +474,8 @@ func (clh *cloudHypervisor) hotPlugVFIODevice(device config.VFIODev) error {
return err return err
} }
func (clh *cloudHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := clh.trace("hotplugAddDevice") span, _ := clh.trace(ctx, "hotplugAddDevice")
defer span.End() defer span.End()
switch devType { switch devType {
@ -490,8 +491,8 @@ func (clh *cloudHypervisor) hotplugAddDevice(devInfo interface{}, devType device
} }
func (clh *cloudHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := clh.trace("hotplugRemoveDevice") span, ctx := clh.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
var deviceID string var deviceID string
@ -525,7 +526,7 @@ func (clh *cloudHypervisor) hypervisorConfig() HypervisorConfig {
return clh.config return clh.config
} }
func (clh *cloudHypervisor) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) { func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
// TODO: Add support for virtio-mem // TODO: Add support for virtio-mem
@ -590,7 +591,7 @@ func (clh *cloudHypervisor) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint
return uint32(newMem.ToMiB()), memoryDevice{sizeMB: int(hotplugSize.ToMiB())}, nil return uint32(newMem.ToMiB()), memoryDevice{sizeMB: int(hotplugSize.ToMiB())}, nil
} }
func (clh *cloudHypervisor) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
cl := clh.client() cl := clh.client()
// Retrieve the number of current vCPUs via HTTP API // Retrieve the number of current vCPUs via HTTP API
@ -630,12 +631,12 @@ func (clh *cloudHypervisor) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, n
return currentVCPUs, newVCPUs, nil return currentVCPUs, newVCPUs, nil
} }
func (clh *cloudHypervisor) cleanup() error { func (clh *cloudHypervisor) cleanup(ctx context.Context) error {
clh.Logger().WithField("function", "cleanup").Info("cleanup") clh.Logger().WithField("function", "cleanup").Info("cleanup")
return nil return nil
} }
func (clh *cloudHypervisor) pauseSandbox() error { func (clh *cloudHypervisor) pauseSandbox(ctx context.Context) error {
clh.Logger().WithField("function", "pauseSandbox").Info("Pause Sandbox") clh.Logger().WithField("function", "pauseSandbox").Info("Pause Sandbox")
return nil return nil
} }
@ -645,24 +646,24 @@ func (clh *cloudHypervisor) saveSandbox() error {
return nil return nil
} }
func (clh *cloudHypervisor) resumeSandbox() error { func (clh *cloudHypervisor) resumeSandbox(ctx context.Context) error {
clh.Logger().WithField("function", "resumeSandbox").Info("Resume Sandbox") clh.Logger().WithField("function", "resumeSandbox").Info("Resume Sandbox")
return nil return nil
} }
// stopSandbox will stop the Sandbox's VM. // stopSandbox will stop the Sandbox's VM.
func (clh *cloudHypervisor) stopSandbox() (err error) { func (clh *cloudHypervisor) stopSandbox(ctx context.Context) (err error) {
span, _ := clh.trace("stopSandbox") span, ctx := clh.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
clh.Logger().WithField("function", "stopSandbox").Info("Stop Sandbox") clh.Logger().WithField("function", "stopSandbox").Info("Stop Sandbox")
return clh.terminate() return clh.terminate(ctx)
} }
func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error { func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
return errors.New("cloudHypervisor is not supported by VM cache") return errors.New("cloudHypervisor is not supported by VM cache")
} }
func (clh *cloudHypervisor) toGrpc() ([]byte, error) { func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("cloudHypervisor is not supported by VM cache") return nil, errors.New("cloudHypervisor is not supported by VM cache")
} }
@ -697,8 +698,8 @@ func (clh *cloudHypervisor) getPids() []int {
return pids return pids
} }
func (clh *cloudHypervisor) addDevice(devInfo interface{}, devType deviceType) error { func (clh *cloudHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
span, _ := clh.trace("addDevice") span, _ := clh.trace(ctx, "addDevice")
defer span.End() defer span.End()
var err error var err error
@ -731,8 +732,8 @@ func (clh *cloudHypervisor) Logger() *log.Entry {
} }
// Adds all capabilities supported by cloudHypervisor implementation of hypervisor interface // Adds all capabilities supported by cloudHypervisor implementation of hypervisor interface
func (clh *cloudHypervisor) capabilities() types.Capabilities { func (clh *cloudHypervisor) capabilities(ctx context.Context) types.Capabilities {
span, _ := clh.trace("capabilities") span, _ := clh.trace(ctx, "capabilities")
defer span.End() defer span.End()
clh.Logger().WithField("function", "capabilities").Info("get Capabilities") clh.Logger().WithField("function", "capabilities").Info("get Capabilities")
@ -742,21 +743,21 @@ func (clh *cloudHypervisor) capabilities() types.Capabilities {
return caps return caps
} }
func (clh *cloudHypervisor) trace(name string) (otelTrace.Span, context.Context) { func (clh *cloudHypervisor) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if clh.ctx == nil { if parent == nil {
clh.Logger().WithField("type", "bug").Error("trace called before context set") clh.Logger().WithField("type", "bug").Error("trace called before context set")
clh.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(clh.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("clh")}...) span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("clh")}...)
return span, ctx return span, ctx
} }
func (clh *cloudHypervisor) terminate() (err error) { func (clh *cloudHypervisor) terminate(ctx context.Context) (err error) {
span, _ := clh.trace("terminate") span, ctx := clh.trace(ctx, "terminate")
defer span.End() defer span.End()
pid := clh.state.PID pid := clh.state.PID
@ -817,7 +818,7 @@ func (clh *cloudHypervisor) terminate() (err error) {
} }
clh.Logger().Debug("stop virtiofsd") clh.Logger().Debug("stop virtiofsd")
if err = clh.virtiofsd.Stop(); err != nil { if err = clh.virtiofsd.Stop(ctx); err != nil {
clh.Logger().Error("failed to stop virtiofsd") clh.Logger().Error("failed to stop virtiofsd")
} }

View File

@ -266,7 +266,7 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
store: store, store: store,
} }
err = clh.startSandbox(10) err = clh.startSandbox(context.Background(), 10)
assert.NoError(err) assert.NoError(err)
} }
@ -300,7 +300,7 @@ func TestCloudHypervisorResizeMemory(t *testing.T) {
clh.APIClient = mockClient clh.APIClient = mockClient
clh.config = clhConfig clh.config = clhConfig
newMem, memDev, err := clh.resizeMemory(tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false) newMem, memDev, err := clh.resizeMemory(context.Background(), tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("cloudHypervisor.resizeMemory() error = %v, expected to fail = %v", err, tt.wantErr) t.Errorf("cloudHypervisor.resizeMemory() error = %v, expected to fail = %v", err, tt.wantErr)
@ -400,12 +400,12 @@ func TestCloudHypervisorHotplugRemoveDevice(t *testing.T) {
clh.config = clhConfig clh.config = clhConfig
clh.APIClient = &clhClientMock{} clh.APIClient = &clhClientMock{}
_, err = clh.hotplugRemoveDevice(&config.BlockDrive{}, blockDev) _, err = clh.hotplugRemoveDevice(context.Background(), &config.BlockDrive{}, blockDev)
assert.NoError(err, "Hotplug remove block device expected no error") assert.NoError(err, "Hotplug remove block device expected no error")
_, err = clh.hotplugRemoveDevice(&config.VFIODev{}, vfioDev) _, err = clh.hotplugRemoveDevice(context.Background(), &config.VFIODev{}, vfioDev)
assert.NoError(err, "Hotplug remove vfio block device expected no error") assert.NoError(err, "Hotplug remove vfio block device expected no error")
_, err = clh.hotplugRemoveDevice(nil, netDev) _, err = clh.hotplugRemoveDevice(context.Background(), nil, netDev)
assert.Error(err, "Hotplug remove pmem block device expected error") assert.Error(err, "Hotplug remove pmem block device expected error")
} }

View File

@ -353,14 +353,14 @@ func (c *Container) Logger() *logrus.Entry {
}) })
} }
func (c *Container) trace(name string) (otelTrace.Span, context.Context) { func (c *Container) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if c.ctx == nil { if parent == nil {
c.Logger().WithField("type", "bug").Error("trace called before context set") c.Logger().WithField("type", "bug").Error("trace called before context set")
c.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(c.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes(otelLabel.Key("subsystem").String("container")) span.SetAttributes(otelLabel.Key("subsystem").String("container"))
return span, ctx return span, ctx
@ -437,7 +437,7 @@ func (c *Container) setContainerState(state types.StateString) error {
return nil return nil
} }
func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, guestSharedDir string) (string, bool, error) { func (c *Container) shareFiles(ctx context.Context, m Mount, idx int, hostSharedDir, hostMountDir, guestSharedDir string) (string, bool, error) {
randBytes, err := utils.GenerateRandomBytes(8) randBytes, err := utils.GenerateRandomBytes(8)
if err != nil { if err != nil {
return "", false, err return "", false, err
@ -448,7 +448,7 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, gu
// copy file to contaier's rootfs if filesystem sharing is not supported, otherwise // copy file to contaier's rootfs if filesystem sharing is not supported, otherwise
// bind mount it in the shared directory. // bind mount it in the shared directory.
caps := c.sandbox.hypervisor.capabilities() caps := c.sandbox.hypervisor.capabilities(ctx)
if !caps.IsFsSharingSupported() { if !caps.IsFsSharingSupported() {
c.Logger().Debug("filesystem sharing is not supported, files will be copied") c.Logger().Debug("filesystem sharing is not supported, files will be copied")
@ -466,13 +466,13 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, gu
return "", true, nil return "", true, nil
} }
if err := c.sandbox.agent.copyFile(m.Source, guestDest); err != nil { if err := c.sandbox.agent.copyFile(ctx, m.Source, guestDest); err != nil {
return "", false, err return "", false, err
} }
} else { } else {
// These mounts are created in the shared dir // These mounts are created in the shared dir
mountDest := filepath.Join(hostMountDir, filename) mountDest := filepath.Join(hostMountDir, filename)
if err := bindMount(c.ctx, m.Source, mountDest, m.ReadOnly, "private"); err != nil { if err := bindMount(ctx, m.Source, mountDest, m.ReadOnly, "private"); err != nil {
return "", false, err return "", false, err
} }
// Save HostPath mount value into the mount list of the container. // Save HostPath mount value into the mount list of the container.
@ -494,14 +494,14 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, gu
// It also updates the container mount list with the HostPath info, and store // It also updates the container mount list with the HostPath info, and store
// container mounts to the storage. This way, we will have the HostPath info // container mounts to the storage. This way, we will have the HostPath info
// available when we will need to unmount those mounts. // available when we will need to unmount those mounts.
func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestSharedDir string) (sharedDirMounts map[string]Mount, ignoredMounts map[string]Mount, err error) { func (c *Container) mountSharedDirMounts(ctx context.Context, hostSharedDir, hostMountDir, guestSharedDir string) (sharedDirMounts map[string]Mount, ignoredMounts map[string]Mount, err error) {
sharedDirMounts = make(map[string]Mount) sharedDirMounts = make(map[string]Mount)
ignoredMounts = make(map[string]Mount) ignoredMounts = make(map[string]Mount)
var devicesToDetach []string var devicesToDetach []string
defer func() { defer func() {
if err != nil { if err != nil {
for _, id := range devicesToDetach { for _, id := range devicesToDetach {
c.sandbox.devManager.DetachDevice(id, c.sandbox) c.sandbox.devManager.DetachDevice(ctx, id, c.sandbox)
} }
} }
}() }()
@ -517,7 +517,7 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestShare
// instead of passing this as a shared mount: // instead of passing this as a shared mount:
if len(m.BlockDeviceID) > 0 { if len(m.BlockDeviceID) > 0 {
// Attach this block device, all other devices passed in the config have been attached at this point // Attach this block device, all other devices passed in the config have been attached at this point
if err = c.sandbox.devManager.AttachDevice(m.BlockDeviceID, c.sandbox); err != nil { if err = c.sandbox.devManager.AttachDevice(ctx, m.BlockDeviceID, c.sandbox); err != nil {
return nil, nil, err return nil, nil, err
} }
devicesToDetach = append(devicesToDetach, m.BlockDeviceID) devicesToDetach = append(devicesToDetach, m.BlockDeviceID)
@ -545,7 +545,7 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestShare
var ignore bool var ignore bool
var guestDest string var guestDest string
guestDest, ignore, err = c.shareFiles(m, idx, hostSharedDir, hostMountDir, guestSharedDir) guestDest, ignore, err = c.shareFiles(ctx, m, idx, hostSharedDir, hostMountDir, guestSharedDir)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -570,14 +570,14 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestShare
return sharedDirMounts, ignoredMounts, nil return sharedDirMounts, ignoredMounts, nil
} }
func (c *Container) unmountHostMounts() error { func (c *Container) unmountHostMounts(ctx context.Context) error {
var span otelTrace.Span var span otelTrace.Span
span, c.ctx = c.trace("unmountHostMounts") span, ctx = c.trace(ctx, "unmountHostMounts")
defer span.End() defer span.End()
for _, m := range c.mounts { for _, m := range c.mounts {
if m.HostPath != "" { if m.HostPath != "" {
span, _ := c.trace("unmount") span, _ := c.trace(ctx, "unmount")
span.SetAttributes(otelLabel.Key("host-path").String(m.HostPath)) span.SetAttributes(otelLabel.Key("host-path").String(m.HostPath))
if err := syscall.Unmount(m.HostPath, syscall.MNT_DETACH|UmountNoFollow); err != nil { if err := syscall.Unmount(m.HostPath, syscall.MNT_DETACH|UmountNoFollow); err != nil {
@ -634,8 +634,8 @@ func filterDevices(c *Container, devices []ContainerDevice) (ret []ContainerDevi
// Add any mount based block devices to the device manager and save the // Add any mount based block devices to the device manager and save the
// device ID for the particular mount. This'll occur when the mountpoint source // device ID for the particular mount. This'll occur when the mountpoint source
// is a block device. // is a block device.
func (c *Container) createBlockDevices() error { func (c *Container) createBlockDevices(ctx context.Context) error {
if !c.checkBlockDeviceSupport() { if !c.checkBlockDeviceSupport(ctx) {
c.Logger().Warn("Block device not supported") c.Logger().Warn("Block device not supported")
return nil return nil
} }
@ -699,8 +699,8 @@ func (c *Container) createBlockDevices() error {
} }
// newContainer creates a Container structure from a sandbox and a container configuration. // newContainer creates a Container structure from a sandbox and a container configuration.
func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, error) { func newContainer(ctx context.Context, sandbox *Sandbox, contConfig *ContainerConfig) (*Container, error) {
span, _ := sandbox.trace("newContainer") span, ctx := sandbox.trace(ctx, "newContainer")
defer span.End() defer span.End()
if !contConfig.valid() { if !contConfig.valid() {
@ -734,7 +734,7 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er
} }
// If mounts are block devices, add to devmanager // If mounts are block devices, add to devmanager
if err := c.createMounts(); err != nil { if err := c.createMounts(ctx); err != nil {
return nil, err return nil, err
} }
@ -746,9 +746,9 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er
return c, nil return c, nil
} }
func (c *Container) createMounts() error { func (c *Container) createMounts(ctx context.Context) error {
// Create block devices for newly created container // Create block devices for newly created container
return c.createBlockDevices() return c.createBlockDevices(ctx)
} }
func (c *Container) createDevices(contConfig *ContainerConfig) error { func (c *Container) createDevices(contConfig *ContainerConfig) error {
@ -777,25 +777,25 @@ func (c *Container) createDevices(contConfig *ContainerConfig) error {
// been performed before the container creation failed. // been performed before the container creation failed.
// - Unplug CPU and memory resources from the VM. // - Unplug CPU and memory resources from the VM.
// - Unplug devices from the VM. // - Unplug devices from the VM.
func (c *Container) rollbackFailingContainerCreation() { func (c *Container) rollbackFailingContainerCreation(ctx context.Context) {
if err := c.detachDevices(); err != nil { if err := c.detachDevices(ctx); err != nil {
c.Logger().WithError(err).Error("rollback failed detachDevices()") c.Logger().WithError(err).Error("rollback failed detachDevices()")
} }
if err := c.removeDrive(); err != nil { if err := c.removeDrive(ctx); err != nil {
c.Logger().WithError(err).Error("rollback failed removeDrive()") c.Logger().WithError(err).Error("rollback failed removeDrive()")
} }
if err := c.unmountHostMounts(); err != nil { if err := c.unmountHostMounts(ctx); err != nil {
c.Logger().WithError(err).Error("rollback failed unmountHostMounts()") c.Logger().WithError(err).Error("rollback failed unmountHostMounts()")
} }
if err := bindUnmountContainerRootfs(c.ctx, getMountPath(c.sandbox.id), c.id); err != nil { if err := bindUnmountContainerRootfs(ctx, getMountPath(c.sandbox.id), c.id); err != nil {
c.Logger().WithError(err).Error("rollback failed bindUnmountContainerRootfs()") c.Logger().WithError(err).Error("rollback failed bindUnmountContainerRootfs()")
} }
} }
func (c *Container) checkBlockDeviceSupport() bool { func (c *Container) checkBlockDeviceSupport(ctx context.Context) bool {
if !c.sandbox.config.HypervisorConfig.DisableBlockDeviceUse { if !c.sandbox.config.HypervisorConfig.DisableBlockDeviceUse {
agentCaps := c.sandbox.agent.capabilities() agentCaps := c.sandbox.agent.capabilities()
hypervisorCaps := c.sandbox.hypervisor.capabilities() hypervisorCaps := c.sandbox.hypervisor.capabilities(ctx)
if agentCaps.IsBlockDeviceSupported() && hypervisorCaps.IsBlockDeviceHotplugSupported() { if agentCaps.IsBlockDeviceSupported() && hypervisorCaps.IsBlockDeviceHotplugSupported() {
return true return true
@ -807,19 +807,19 @@ func (c *Container) checkBlockDeviceSupport() bool {
// createContainer creates and start a container inside a Sandbox. It has to be // createContainer creates and start a container inside a Sandbox. It has to be
// called only when a new container, not known by the sandbox, has to be created. // called only when a new container, not known by the sandbox, has to be created.
func (c *Container) create() (err error) { func (c *Container) create(ctx context.Context) (err error) {
// In case the container creation fails, the following takes care // In case the container creation fails, the following takes care
// of rolling back all the actions previously performed. // of rolling back all the actions previously performed.
defer func() { defer func() {
if err != nil { if err != nil {
c.Logger().WithError(err).Error("container create failed") c.Logger().WithError(err).Error("container create failed")
c.rollbackFailingContainerCreation() c.rollbackFailingContainerCreation(ctx)
} }
}() }()
if c.checkBlockDeviceSupport() { if c.checkBlockDeviceSupport(ctx) {
// If the rootfs is backed by a block device, go ahead and hotplug it to the guest // If the rootfs is backed by a block device, go ahead and hotplug it to the guest
if err = c.hotplugDrive(); err != nil { if err = c.hotplugDrive(ctx); err != nil {
return return
} }
} }
@ -853,7 +853,7 @@ func (c *Container) create() (err error) {
"devices": normalAttachedDevs, "devices": normalAttachedDevs,
}).Info("normal attach devices") }).Info("normal attach devices")
if len(normalAttachedDevs) > 0 { if len(normalAttachedDevs) > 0 {
if err = c.attachDevices(normalAttachedDevs); err != nil { if err = c.attachDevices(ctx, normalAttachedDevs); err != nil {
return return
} }
} }
@ -862,7 +862,7 @@ func (c *Container) create() (err error) {
// inside the VM // inside the VM
c.getSystemMountInfo() c.getSystemMountInfo()
process, err := c.sandbox.agent.createContainer(c.sandbox, c) process, err := c.sandbox.agent.createContainer(ctx, c.sandbox, c)
if err != nil { if err != nil {
return err return err
} }
@ -874,7 +874,7 @@ func (c *Container) create() (err error) {
"machine_type": machineType, "machine_type": machineType,
"devices": delayAttachedDevs, "devices": delayAttachedDevs,
}).Info("lazy attach devices") }).Info("lazy attach devices")
if err = c.attachDevices(delayAttachedDevs); err != nil { if err = c.attachDevices(ctx, delayAttachedDevs); err != nil {
return return
} }
} }
@ -892,7 +892,7 @@ func (c *Container) create() (err error) {
return nil return nil
} }
func (c *Container) delete() error { func (c *Container) delete(ctx context.Context) error {
if c.state.State != types.StateReady && if c.state.State != types.StateReady &&
c.state.State != types.StateStopped { c.state.State != types.StateStopped {
return fmt.Errorf("Container not ready or stopped, impossible to delete") return fmt.Errorf("Container not ready or stopped, impossible to delete")
@ -910,7 +910,7 @@ func (c *Container) delete() error {
} }
} }
return c.sandbox.storeSandbox() return c.sandbox.storeSandbox(ctx)
} }
// checkSandboxRunning validates the container state. // checkSandboxRunning validates the container state.
@ -943,7 +943,7 @@ func (c *Container) getSystemMountInfo() {
// TODO Deduce /dev/shm size. See https://github.com/clearcontainers/runtime/issues/138 // TODO Deduce /dev/shm size. See https://github.com/clearcontainers/runtime/issues/138
} }
func (c *Container) start() error { func (c *Container) start(ctx context.Context) error {
if err := c.checkSandboxRunning("start"); err != nil { if err := c.checkSandboxRunning("start"); err != nil {
return err return err
} }
@ -957,10 +957,10 @@ func (c *Container) start() error {
return err return err
} }
if err := c.sandbox.agent.startContainer(c.sandbox, c); err != nil { if err := c.sandbox.agent.startContainer(ctx, c.sandbox, c); err != nil {
c.Logger().WithError(err).Error("Failed to start container") c.Logger().WithError(err).Error("Failed to start container")
if err := c.stop(true); err != nil { if err := c.stop(ctx, true); err != nil {
c.Logger().WithError(err).Warn("Failed to stop container") c.Logger().WithError(err).Warn("Failed to stop container")
} }
return err return err
@ -969,8 +969,9 @@ func (c *Container) start() error {
return c.setContainerState(types.StateRunning) return c.setContainerState(types.StateRunning)
} }
func (c *Container) stop(force bool) error { func (c *Container) stop(ctx context.Context, force bool) error {
span, _ := c.trace("stop") var span otelTrace.Span
span, ctx = c.trace(ctx, "stop")
defer span.End() defer span.End()
// In case the container status has been updated implicitly because // In case the container status has been updated implicitly because
@ -992,13 +993,13 @@ func (c *Container) stop(force bool) error {
// Force the container to be killed. For most of the cases, this // Force the container to be killed. For most of the cases, this
// should not matter and it should return an error that will be // should not matter and it should return an error that will be
// ignored. // ignored.
c.kill(syscall.SIGKILL, true) c.kill(ctx, syscall.SIGKILL, true)
// Since the agent has supported the MultiWaitProcess, it's better to // Since the agent has supported the MultiWaitProcess, it's better to
// wait the process here to make sure the process has exited before to // wait the process here to make sure the process has exited before to
// issue stopContainer, otherwise the RemoveContainerRequest in it will // issue stopContainer, otherwise the RemoveContainerRequest in it will
// get failed if the process hasn't exited. // get failed if the process hasn't exited.
c.sandbox.agent.waitProcess(c, c.id) c.sandbox.agent.waitProcess(ctx, c, c.id)
defer func() { defer func() {
// Save device and drive data. // Save device and drive data.
@ -1008,23 +1009,23 @@ func (c *Container) stop(force bool) error {
} }
}() }()
if err := c.sandbox.agent.stopContainer(c.sandbox, *c); err != nil && !force { if err := c.sandbox.agent.stopContainer(ctx, c.sandbox, *c); err != nil && !force {
return err return err
} }
if err := c.unmountHostMounts(); err != nil && !force { if err := c.unmountHostMounts(ctx); err != nil && !force {
return err return err
} }
if err := bindUnmountContainerRootfs(c.ctx, getMountPath(c.sandbox.id), c.id); err != nil && !force { if err := bindUnmountContainerRootfs(ctx, getMountPath(c.sandbox.id), c.id); err != nil && !force {
return err return err
} }
if err := c.detachDevices(); err != nil && !force { if err := c.detachDevices(ctx); err != nil && !force {
return err return err
} }
if err := c.removeDrive(); err != nil && !force { if err := c.removeDrive(ctx); err != nil && !force {
return err return err
} }
@ -1043,7 +1044,7 @@ func (c *Container) stop(force bool) error {
return nil return nil
} }
func (c *Container) enter(cmd types.Cmd) (*Process, error) { func (c *Container) enter(ctx context.Context, cmd types.Cmd) (*Process, error) {
if err := c.checkSandboxRunning("enter"); err != nil { if err := c.checkSandboxRunning("enter"); err != nil {
return nil, err return nil, err
} }
@ -1054,7 +1055,7 @@ func (c *Container) enter(cmd types.Cmd) (*Process, error) {
"impossible to enter") "impossible to enter")
} }
process, err := c.sandbox.agent.exec(c.sandbox, *c, cmd) process, err := c.sandbox.agent.exec(ctx, c.sandbox, *c, cmd)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1062,21 +1063,21 @@ func (c *Container) enter(cmd types.Cmd) (*Process, error) {
return process, nil return process, nil
} }
func (c *Container) wait(processID string) (int32, error) { func (c *Container) wait(ctx context.Context, processID string) (int32, error) {
if c.state.State != types.StateReady && if c.state.State != types.StateReady &&
c.state.State != types.StateRunning { c.state.State != types.StateRunning {
return 0, fmt.Errorf("Container not ready or running, " + return 0, fmt.Errorf("Container not ready or running, " +
"impossible to wait") "impossible to wait")
} }
return c.sandbox.agent.waitProcess(c, processID) return c.sandbox.agent.waitProcess(ctx, c, processID)
} }
func (c *Container) kill(signal syscall.Signal, all bool) error { func (c *Container) kill(ctx context.Context, signal syscall.Signal, all bool) error {
return c.signalProcess(c.process.Token, signal, all) return c.signalProcess(ctx, c.process.Token, signal, all)
} }
func (c *Container) signalProcess(processID string, signal syscall.Signal, all bool) error { func (c *Container) signalProcess(ctx context.Context, processID string, signal syscall.Signal, all bool) error {
if c.sandbox.state.State != types.StateReady && c.sandbox.state.State != types.StateRunning { if c.sandbox.state.State != types.StateReady && c.sandbox.state.State != types.StateRunning {
return fmt.Errorf("Sandbox not ready or running, impossible to signal the container") return fmt.Errorf("Sandbox not ready or running, impossible to signal the container")
} }
@ -1085,15 +1086,15 @@ func (c *Container) signalProcess(processID string, signal syscall.Signal, all b
return fmt.Errorf("Container not ready, running or paused, impossible to signal the container") return fmt.Errorf("Container not ready, running or paused, impossible to signal the container")
} }
return c.sandbox.agent.signalProcess(c, processID, signal, all) return c.sandbox.agent.signalProcess(ctx, c, processID, signal, all)
} }
func (c *Container) winsizeProcess(processID string, height, width uint32) error { func (c *Container) winsizeProcess(ctx context.Context, processID string, height, width uint32) error {
if c.state.State != types.StateReady && c.state.State != types.StateRunning { if c.state.State != types.StateReady && c.state.State != types.StateRunning {
return fmt.Errorf("Container not ready or running, impossible to signal the container") return fmt.Errorf("Container not ready or running, impossible to signal the container")
} }
return c.sandbox.agent.winsizeProcess(c, processID, height, width) return c.sandbox.agent.winsizeProcess(ctx, c, processID, height, width)
} }
func (c *Container) ioStream(processID string) (io.WriteCloser, io.Reader, io.Reader, error) { func (c *Container) ioStream(processID string) (io.WriteCloser, io.Reader, io.Reader, error) {
@ -1106,7 +1107,7 @@ func (c *Container) ioStream(processID string) (io.WriteCloser, io.Reader, io.Re
return stream.stdin(), stream.stdout(), stream.stderr(), nil return stream.stdin(), stream.stdout(), stream.stderr(), nil
} }
func (c *Container) processList(options ProcessListOptions) (ProcessList, error) { func (c *Container) processList(ctx context.Context, options ProcessListOptions) (ProcessList, error) {
if err := c.checkSandboxRunning("ps"); err != nil { if err := c.checkSandboxRunning("ps"); err != nil {
return nil, err return nil, err
} }
@ -1115,17 +1116,17 @@ func (c *Container) processList(options ProcessListOptions) (ProcessList, error)
return nil, fmt.Errorf("Container not running, impossible to list processes") return nil, fmt.Errorf("Container not running, impossible to list processes")
} }
return c.sandbox.agent.processListContainer(c.sandbox, *c, options) return c.sandbox.agent.processListContainer(ctx, c.sandbox, *c, options)
} }
func (c *Container) stats() (*ContainerStats, error) { func (c *Container) stats(ctx context.Context) (*ContainerStats, error) {
if err := c.checkSandboxRunning("stats"); err != nil { if err := c.checkSandboxRunning("stats"); err != nil {
return nil, err return nil, err
} }
return c.sandbox.agent.statsContainer(c.sandbox, *c) return c.sandbox.agent.statsContainer(ctx, c.sandbox, *c)
} }
func (c *Container) update(resources specs.LinuxResources) error { func (c *Container) update(ctx context.Context, resources specs.LinuxResources) error {
if err := c.checkSandboxRunning("update"); err != nil { if err := c.checkSandboxRunning("update"); err != nil {
return err return err
} }
@ -1161,7 +1162,7 @@ func (c *Container) update(resources specs.LinuxResources) error {
c.config.Resources.Memory.Limit = mem.Limit c.config.Resources.Memory.Limit = mem.Limit
} }
if err := c.sandbox.updateResources(); err != nil { if err := c.sandbox.updateResources(ctx); err != nil {
return err return err
} }
@ -1179,10 +1180,10 @@ func (c *Container) update(resources specs.LinuxResources) error {
resources.CPU.Cpus = "" resources.CPU.Cpus = ""
} }
return c.sandbox.agent.updateContainer(c.sandbox, *c, resources) return c.sandbox.agent.updateContainer(ctx, c.sandbox, *c, resources)
} }
func (c *Container) pause() error { func (c *Container) pause(ctx context.Context) error {
if err := c.checkSandboxRunning("pause"); err != nil { if err := c.checkSandboxRunning("pause"); err != nil {
return err return err
} }
@ -1191,14 +1192,14 @@ func (c *Container) pause() error {
return fmt.Errorf("Container not running, impossible to pause") return fmt.Errorf("Container not running, impossible to pause")
} }
if err := c.sandbox.agent.pauseContainer(c.sandbox, *c); err != nil { if err := c.sandbox.agent.pauseContainer(ctx, c.sandbox, *c); err != nil {
return err return err
} }
return c.setContainerState(types.StatePaused) return c.setContainerState(types.StatePaused)
} }
func (c *Container) resume() error { func (c *Container) resume(ctx context.Context) error {
if err := c.checkSandboxRunning("resume"); err != nil { if err := c.checkSandboxRunning("resume"); err != nil {
return err return err
} }
@ -1207,7 +1208,7 @@ func (c *Container) resume() error {
return fmt.Errorf("Container not paused, impossible to resume") return fmt.Errorf("Container not paused, impossible to resume")
} }
if err := c.sandbox.agent.resumeContainer(c.sandbox, *c); err != nil { if err := c.sandbox.agent.resumeContainer(ctx, c.sandbox, *c); err != nil {
return err return err
} }
@ -1216,7 +1217,7 @@ func (c *Container) resume() error {
// hotplugDrive will attempt to hotplug the container rootfs if it is backed by a // hotplugDrive will attempt to hotplug the container rootfs if it is backed by a
// block device // block device
func (c *Container) hotplugDrive() error { func (c *Container) hotplugDrive(ctx context.Context) error {
var dev device var dev device
var err error var err error
@ -1276,7 +1277,7 @@ func (c *Container) hotplugDrive() error {
"fs-type": fsType, "fs-type": fsType,
}).Info("Block device detected") }).Info("Block device detected")
if err = c.plugDevice(devicePath); err != nil { if err = c.plugDevice(ctx, devicePath); err != nil {
return err return err
} }
@ -1284,13 +1285,13 @@ func (c *Container) hotplugDrive() error {
} }
// plugDevice will attach the rootfs if blockdevice is supported (this is rootfs specific) // plugDevice will attach the rootfs if blockdevice is supported (this is rootfs specific)
func (c *Container) plugDevice(devicePath string) error { func (c *Container) plugDevice(ctx context.Context, devicePath string) error {
var stat unix.Stat_t var stat unix.Stat_t
if err := unix.Stat(devicePath, &stat); err != nil { if err := unix.Stat(devicePath, &stat); err != nil {
return fmt.Errorf("stat %q failed: %v", devicePath, err) return fmt.Errorf("stat %q failed: %v", devicePath, err)
} }
if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK { if c.checkBlockDeviceSupport(ctx) && stat.Mode&unix.S_IFBLK == unix.S_IFBLK {
b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{ b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{
HostPath: devicePath, HostPath: devicePath,
ContainerPath: filepath.Join(kataGuestSharedDir(), c.id), ContainerPath: filepath.Join(kataGuestSharedDir(), c.id),
@ -1305,7 +1306,7 @@ func (c *Container) plugDevice(devicePath string) error {
c.state.BlockDeviceID = b.DeviceID() c.state.BlockDeviceID = b.DeviceID()
// attach rootfs device // attach rootfs device
if err := c.sandbox.devManager.AttachDevice(b.DeviceID(), c.sandbox); err != nil { if err := c.sandbox.devManager.AttachDevice(ctx, b.DeviceID(), c.sandbox); err != nil {
return err return err
} }
} }
@ -1317,12 +1318,12 @@ func (c *Container) isDriveUsed() bool {
return !(c.state.Fstype == "") return !(c.state.Fstype == "")
} }
func (c *Container) removeDrive() (err error) { func (c *Container) removeDrive(ctx context.Context) (err error) {
if c.isDriveUsed() { if c.isDriveUsed() {
c.Logger().Info("unplugging block device") c.Logger().Info("unplugging block device")
devID := c.state.BlockDeviceID devID := c.state.BlockDeviceID
err := c.sandbox.devManager.DetachDevice(devID, c.sandbox) err := c.sandbox.devManager.DetachDevice(ctx, devID, c.sandbox)
if err != nil && err != manager.ErrDeviceNotAttached { if err != nil && err != manager.ErrDeviceNotAttached {
return err return err
} }
@ -1343,7 +1344,7 @@ func (c *Container) removeDrive() (err error) {
return nil return nil
} }
func (c *Container) attachDevices(devices []ContainerDevice) error { func (c *Container) attachDevices(ctx context.Context, devices []ContainerDevice) error {
// there's no need to do rollback when error happens, // there's no need to do rollback when error happens,
// because if attachDevices fails, container creation will fail too, // because if attachDevices fails, container creation will fail too,
// and rollbackFailingContainerCreation could do all the rollbacks // and rollbackFailingContainerCreation could do all the rollbacks
@ -1352,16 +1353,16 @@ func (c *Container) attachDevices(devices []ContainerDevice) error {
// the devices need to be split into two lists, normalAttachedDevs and delayAttachedDevs. // the devices need to be split into two lists, normalAttachedDevs and delayAttachedDevs.
// so c.device is not used here. See issue https://github.com/kata-containers/runtime/issues/2460. // so c.device is not used here. See issue https://github.com/kata-containers/runtime/issues/2460.
for _, dev := range devices { for _, dev := range devices {
if err := c.sandbox.devManager.AttachDevice(dev.ID, c.sandbox); err != nil { if err := c.sandbox.devManager.AttachDevice(ctx, dev.ID, c.sandbox); err != nil {
return err return err
} }
} }
return nil return nil
} }
func (c *Container) detachDevices() error { func (c *Container) detachDevices(ctx context.Context) error {
for _, dev := range c.devices { for _, dev := range c.devices {
err := c.sandbox.devManager.DetachDevice(dev.ID, c.sandbox) err := c.sandbox.devManager.DetachDevice(ctx, dev.ID, c.sandbox)
if err != nil && err != manager.ErrDeviceNotAttached { if err != nil && err != manager.ErrDeviceNotAttached {
return err return err
} }

View File

@ -7,6 +7,8 @@
package api package api
import ( import (
"context"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api" persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -29,8 +31,8 @@ func DeviceLogger() *logrus.Entry {
// a device should be attached/added/plugged to a DeviceReceiver // a device should be attached/added/plugged to a DeviceReceiver
type DeviceReceiver interface { type DeviceReceiver interface {
// these are for hotplug/hot-unplug devices to/from hypervisor // these are for hotplug/hot-unplug devices to/from hypervisor
HotplugAddDevice(Device, config.DeviceType) error HotplugAddDevice(context.Context, Device, config.DeviceType) error
HotplugRemoveDevice(Device, config.DeviceType) error HotplugRemoveDevice(context.Context, Device, config.DeviceType) error
// this is only for virtio-blk and virtio-scsi support // this is only for virtio-blk and virtio-scsi support
GetAndSetSandboxBlockIndex() (int, error) GetAndSetSandboxBlockIndex() (int, error)
@ -38,13 +40,13 @@ type DeviceReceiver interface {
GetHypervisorType() string GetHypervisorType() string
// this is for appending device to hypervisor boot params // this is for appending device to hypervisor boot params
AppendDevice(Device) error AppendDevice(context.Context, Device) error
} }
// Device is the virtcontainers device interface. // Device is the virtcontainers device interface.
type Device interface { type Device interface {
Attach(DeviceReceiver) error Attach(context.Context, DeviceReceiver) error
Detach(DeviceReceiver) error Detach(context.Context, DeviceReceiver) error
// ID returns device identifier // ID returns device identifier
DeviceID() string DeviceID() string
@ -87,8 +89,8 @@ type Device interface {
type DeviceManager interface { type DeviceManager interface {
NewDevice(config.DeviceInfo) (Device, error) NewDevice(config.DeviceInfo) (Device, error)
RemoveDevice(string) error RemoveDevice(string) error
AttachDevice(string, DeviceReceiver) error AttachDevice(context.Context, string, DeviceReceiver) error
DetachDevice(string, DeviceReceiver) error DetachDevice(context.Context, string, DeviceReceiver) error
IsDeviceAttached(string) bool IsDeviceAttached(string) bool
GetDeviceByID(string) Device GetDeviceByID(string) Device
GetAllDevices() []Device GetAllDevices() []Device

View File

@ -6,6 +6,8 @@
package api package api
import ( import (
"context"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
) )
@ -13,12 +15,12 @@ import (
type MockDeviceReceiver struct{} type MockDeviceReceiver struct{}
// HotplugAddDevice adds a new device // HotplugAddDevice adds a new device
func (mockDC *MockDeviceReceiver) HotplugAddDevice(Device, config.DeviceType) error { func (mockDC *MockDeviceReceiver) HotplugAddDevice(context.Context, Device, config.DeviceType) error {
return nil return nil
} }
// HotplugRemoveDevice removes a device // HotplugRemoveDevice removes a device
func (mockDC *MockDeviceReceiver) HotplugRemoveDevice(Device, config.DeviceType) error { func (mockDC *MockDeviceReceiver) HotplugRemoveDevice(context.Context, Device, config.DeviceType) error {
return nil return nil
} }
@ -33,7 +35,7 @@ func (mockDC *MockDeviceReceiver) UnsetSandboxBlockIndex(int) error {
} }
// AppendDevice adds new vhost user device // AppendDevice adds new vhost user device
func (mockDC *MockDeviceReceiver) AppendDevice(Device) error { func (mockDC *MockDeviceReceiver) AppendDevice(context.Context, Device) error {
return nil return nil
} }

View File

@ -7,6 +7,7 @@
package drivers package drivers
import ( import (
"context"
"path/filepath" "path/filepath"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
@ -35,7 +36,7 @@ func NewBlockDevice(devInfo *config.DeviceInfo) *BlockDevice {
// Attach is standard interface of api.Device, it's used to add device to some // Attach is standard interface of api.Device, it's used to add device to some
// DeviceReceiver // DeviceReceiver
func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) { func (device *BlockDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
skip, err := device.bumpAttachCount(true) skip, err := device.bumpAttachCount(true)
if err != nil { if err != nil {
return err return err
@ -112,7 +113,7 @@ func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
deviceLogger().WithField("device", device.DeviceInfo.HostPath).WithField("VirtPath", drive.VirtPath).Infof("Attaching %s device", customOptions["block-driver"]) deviceLogger().WithField("device", device.DeviceInfo.HostPath).WithField("VirtPath", drive.VirtPath).Infof("Attaching %s device", customOptions["block-driver"])
device.BlockDrive = drive device.BlockDrive = drive
if err = devReceiver.HotplugAddDevice(device, config.DeviceBlock); err != nil { if err = devReceiver.HotplugAddDevice(ctx, device, config.DeviceBlock); err != nil {
return err return err
} }
@ -121,7 +122,7 @@ func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
// Detach is standard interface of api.Device, it's used to remove device from some // Detach is standard interface of api.Device, it's used to remove device from some
// DeviceReceiver // DeviceReceiver
func (device *BlockDevice) Detach(devReceiver api.DeviceReceiver) error { func (device *BlockDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
skip, err := device.bumpAttachCount(false) skip, err := device.bumpAttachCount(false)
if err != nil { if err != nil {
return err return err
@ -140,7 +141,7 @@ func (device *BlockDevice) Detach(devReceiver api.DeviceReceiver) error {
deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Unplugging block device") deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Unplugging block device")
if err = devReceiver.HotplugRemoveDevice(device, config.DeviceBlock); err != nil { if err = devReceiver.HotplugRemoveDevice(ctx, device, config.DeviceBlock); err != nil {
deviceLogger().WithError(err).Error("Failed to unplug block device") deviceLogger().WithError(err).Error("Failed to unplug block device")
return err return err
} }

View File

@ -7,6 +7,7 @@
package drivers package drivers
import ( import (
"context"
"fmt" "fmt"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
@ -32,13 +33,13 @@ func NewGenericDevice(devInfo *config.DeviceInfo) *GenericDevice {
} }
// Attach is standard interface of api.Device // Attach is standard interface of api.Device
func (device *GenericDevice) Attach(devReceiver api.DeviceReceiver) error { func (device *GenericDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) error {
_, err := device.bumpAttachCount(true) _, err := device.bumpAttachCount(true)
return err return err
} }
// Detach is standard interface of api.Device // Detach is standard interface of api.Device
func (device *GenericDevice) Detach(devReceiver api.DeviceReceiver) error { func (device *GenericDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
_, err := device.bumpAttachCount(false) _, err := device.bumpAttachCount(false)
return err return err
} }

View File

@ -7,6 +7,7 @@
package drivers package drivers
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -56,7 +57,7 @@ func NewVFIODevice(devInfo *config.DeviceInfo) *VFIODevice {
// Attach is standard interface of api.Device, it's used to add device to some // Attach is standard interface of api.Device, it's used to add device to some
// DeviceReceiver // DeviceReceiver
func (device *VFIODevice) Attach(devReceiver api.DeviceReceiver) (retErr error) { func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (retErr error) {
skip, err := device.bumpAttachCount(true) skip, err := device.bumpAttachCount(true)
if err != nil { if err != nil {
return err return err
@ -105,13 +106,13 @@ func (device *VFIODevice) Attach(devReceiver api.DeviceReceiver) (retErr error)
deviceLogger().WithField("cold-plug", coldPlug).Info("Attaching VFIO device") deviceLogger().WithField("cold-plug", coldPlug).Info("Attaching VFIO device")
if coldPlug { if coldPlug {
if err := devReceiver.AppendDevice(device); err != nil { if err := devReceiver.AppendDevice(ctx, device); err != nil {
deviceLogger().WithError(err).Error("Failed to append device") deviceLogger().WithError(err).Error("Failed to append device")
return err return err
} }
} else { } else {
// hotplug a VFIO device is actually hotplugging a group of iommu devices // hotplug a VFIO device is actually hotplugging a group of iommu devices
if err := devReceiver.HotplugAddDevice(device, config.DeviceVFIO); err != nil { if err := devReceiver.HotplugAddDevice(ctx, device, config.DeviceVFIO); err != nil {
deviceLogger().WithError(err).Error("Failed to add device") deviceLogger().WithError(err).Error("Failed to add device")
return err return err
} }
@ -126,7 +127,7 @@ func (device *VFIODevice) Attach(devReceiver api.DeviceReceiver) (retErr error)
// Detach is standard interface of api.Device, it's used to remove device from some // Detach is standard interface of api.Device, it's used to remove device from some
// DeviceReceiver // DeviceReceiver
func (device *VFIODevice) Detach(devReceiver api.DeviceReceiver) (retErr error) { func (device *VFIODevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) (retErr error) {
skip, err := device.bumpAttachCount(false) skip, err := device.bumpAttachCount(false)
if err != nil { if err != nil {
return err return err
@ -151,7 +152,7 @@ func (device *VFIODevice) Detach(devReceiver api.DeviceReceiver) (retErr error)
} }
// hotplug a VFIO device is actually hotplugging a group of iommu devices // hotplug a VFIO device is actually hotplugging a group of iommu devices
if err := devReceiver.HotplugRemoveDevice(device, config.DeviceVFIO); err != nil { if err := devReceiver.HotplugRemoveDevice(ctx, device, config.DeviceVFIO); err != nil {
deviceLogger().WithError(err).Error("Failed to remove device") deviceLogger().WithError(err).Error("Failed to remove device")
return err return err
} }

View File

@ -7,6 +7,8 @@
package drivers package drivers
import ( import (
"context"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api" persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
@ -36,7 +38,7 @@ func NewVhostUserBlkDevice(devInfo *config.DeviceInfo) *VhostUserBlkDevice {
// Attach is standard interface of api.Device, it's used to add device to some // Attach is standard interface of api.Device, it's used to add device to some
// DeviceReceiver // DeviceReceiver
func (device *VhostUserBlkDevice) Attach(devReceiver api.DeviceReceiver) (err error) { func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
skip, err := device.bumpAttachCount(true) skip, err := device.bumpAttachCount(true)
if err != nil { if err != nil {
return err return err
@ -85,7 +87,7 @@ func (device *VhostUserBlkDevice) Attach(devReceiver api.DeviceReceiver) (err er
}).Info("Attaching device") }).Info("Attaching device")
device.VhostUserDeviceAttrs = vAttrs device.VhostUserDeviceAttrs = vAttrs
if err = devReceiver.HotplugAddDevice(device, config.VhostUserBlk); err != nil { if err = devReceiver.HotplugAddDevice(ctx, device, config.VhostUserBlk); err != nil {
return err return err
} }
@ -114,7 +116,7 @@ func isVirtioBlkBlockDriver(customOptions map[string]string) bool {
// Detach is standard interface of api.Device, it's used to remove device from some // Detach is standard interface of api.Device, it's used to remove device from some
// DeviceReceiver // DeviceReceiver
func (device *VhostUserBlkDevice) Detach(devReceiver api.DeviceReceiver) error { func (device *VhostUserBlkDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
skip, err := device.bumpAttachCount(false) skip, err := device.bumpAttachCount(false)
if err != nil { if err != nil {
return err return err
@ -136,7 +138,7 @@ func (device *VhostUserBlkDevice) Detach(devReceiver api.DeviceReceiver) error {
deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Unplugging vhost-user-blk device") deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Unplugging vhost-user-blk device")
if err = devReceiver.HotplugRemoveDevice(device, config.VhostUserBlk); err != nil { if err = devReceiver.HotplugRemoveDevice(ctx, device, config.VhostUserBlk); err != nil {
deviceLogger().WithError(err).Error("Failed to unplug vhost-user-blk device") deviceLogger().WithError(err).Error("Failed to unplug vhost-user-blk device")
return err return err
} }

View File

@ -6,6 +6,7 @@
package drivers package drivers
import ( import (
"context"
"encoding/hex" "encoding/hex"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
@ -21,7 +22,7 @@ type VhostUserFSDevice struct {
// Device interface // Device interface
func (device *VhostUserFSDevice) Attach(devReceiver api.DeviceReceiver) (err error) { func (device *VhostUserFSDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
skip, err := device.bumpAttachCount(true) skip, err := device.bumpAttachCount(true)
if err != nil { if err != nil {
return err return err
@ -46,10 +47,10 @@ func (device *VhostUserFSDevice) Attach(devReceiver api.DeviceReceiver) (err err
device.DevID = id device.DevID = id
device.Type = device.DeviceType() device.Type = device.DeviceType()
return devReceiver.AppendDevice(device) return devReceiver.AppendDevice(ctx, device)
} }
func (device *VhostUserFSDevice) Detach(devReceiver api.DeviceReceiver) error { func (device *VhostUserFSDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
_, err := device.bumpAttachCount(false) _, err := device.bumpAttachCount(false)
return err return err
} }

View File

@ -7,6 +7,7 @@
package drivers package drivers
import ( import (
"context"
"encoding/hex" "encoding/hex"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
@ -27,7 +28,7 @@ type VhostUserNetDevice struct {
// Attach is standard interface of api.Device, it's used to add device to some // Attach is standard interface of api.Device, it's used to add device to some
// DeviceReceiver // DeviceReceiver
func (device *VhostUserNetDevice) Attach(devReceiver api.DeviceReceiver) (err error) { func (device *VhostUserNetDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
skip, err := device.bumpAttachCount(true) skip, err := device.bumpAttachCount(true)
if err != nil { if err != nil {
return err return err
@ -52,12 +53,12 @@ func (device *VhostUserNetDevice) Attach(devReceiver api.DeviceReceiver) (err er
device.DevID = id device.DevID = id
device.Type = device.DeviceType() device.Type = device.DeviceType()
return devReceiver.AppendDevice(device) return devReceiver.AppendDevice(ctx, device)
} }
// Detach is standard interface of api.Device, it's used to remove device from some // Detach is standard interface of api.Device, it's used to remove device from some
// DeviceReceiver // DeviceReceiver
func (device *VhostUserNetDevice) Detach(devReceiver api.DeviceReceiver) error { func (device *VhostUserNetDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
_, err := device.bumpAttachCount(false) _, err := device.bumpAttachCount(false)
return err return err
} }

View File

@ -7,6 +7,7 @@
package drivers package drivers
import ( import (
"context"
"encoding/hex" "encoding/hex"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
@ -27,7 +28,7 @@ type VhostUserSCSIDevice struct {
// Attach is standard interface of api.Device, it's used to add device to some // Attach is standard interface of api.Device, it's used to add device to some
// DeviceReceiver // DeviceReceiver
func (device *VhostUserSCSIDevice) Attach(devReceiver api.DeviceReceiver) (err error) { func (device *VhostUserSCSIDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
skip, err := device.bumpAttachCount(true) skip, err := device.bumpAttachCount(true)
if err != nil { if err != nil {
return err return err
@ -52,12 +53,12 @@ func (device *VhostUserSCSIDevice) Attach(devReceiver api.DeviceReceiver) (err e
device.DevID = id device.DevID = id
device.Type = device.DeviceType() device.Type = device.DeviceType()
return devReceiver.AppendDevice(device) return devReceiver.AppendDevice(ctx, device)
} }
// Detach is standard interface of api.Device, it's used to remove device from some // Detach is standard interface of api.Device, it's used to remove device from some
// DeviceReceiver // DeviceReceiver
func (device *VhostUserSCSIDevice) Detach(devReceiver api.DeviceReceiver) error { func (device *VhostUserSCSIDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
_, err := device.bumpAttachCount(false) _, err := device.bumpAttachCount(false)
return err return err
} }

View File

@ -7,6 +7,7 @@
package manager package manager
import ( import (
"context"
"encoding/hex" "encoding/hex"
"errors" "errors"
"sync" "sync"
@ -189,7 +190,7 @@ func (dm *deviceManager) newDeviceID() (string, error) {
return "", ErrIDExhausted return "", ErrIDExhausted
} }
func (dm *deviceManager) AttachDevice(id string, dr api.DeviceReceiver) error { func (dm *deviceManager) AttachDevice(ctx context.Context, id string, dr api.DeviceReceiver) error {
dm.Lock() dm.Lock()
defer dm.Unlock() defer dm.Unlock()
@ -198,13 +199,13 @@ func (dm *deviceManager) AttachDevice(id string, dr api.DeviceReceiver) error {
return ErrDeviceNotExist return ErrDeviceNotExist
} }
if err := d.Attach(dr); err != nil { if err := d.Attach(ctx, dr); err != nil {
return err return err
} }
return nil return nil
} }
func (dm *deviceManager) DetachDevice(id string, dr api.DeviceReceiver) error { func (dm *deviceManager) DetachDevice(ctx context.Context, id string, dr api.DeviceReceiver) error {
dm.Lock() dm.Lock()
defer dm.Unlock() defer dm.Unlock()
@ -216,7 +217,7 @@ func (dm *deviceManager) DetachDevice(id string, dr api.DeviceReceiver) error {
return ErrDeviceNotAttached return ErrDeviceNotAttached
} }
if err := d.Detach(dr); err != nil { if err := d.Detach(ctx, dr); err != nil {
return err return err
} }
return nil return nil

View File

@ -7,6 +7,7 @@
package manager package manager
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -150,10 +151,10 @@ func TestAttachVFIODevice(t *testing.T) {
assert.True(t, ok) assert.True(t, ok)
devReceiver := &api.MockDeviceReceiver{} devReceiver := &api.MockDeviceReceiver{}
err = device.Attach(devReceiver) err = device.Attach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(devReceiver) err = device.Detach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
} }
@ -178,7 +179,7 @@ func TestAttachGenericDevice(t *testing.T) {
err = device.Attach(devReceiver) err = device.Attach(devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(devReceiver) err = device.Detach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
} }
@ -200,10 +201,10 @@ func TestAttachBlockDevice(t *testing.T) {
_, ok := device.(*drivers.BlockDevice) _, ok := device.(*drivers.BlockDevice)
assert.True(t, ok) assert.True(t, ok)
err = device.Attach(devReceiver) err = device.Attach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(devReceiver) err = device.Detach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
// test virtio SCSI driver // test virtio SCSI driver
@ -213,7 +214,7 @@ func TestAttachBlockDevice(t *testing.T) {
err = device.Attach(devReceiver) err = device.Attach(devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(devReceiver) err = device.Detach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
} }
@ -287,10 +288,10 @@ func TestAttachVhostUserBlkDevice(t *testing.T) {
_, ok := device.(*drivers.VhostUserBlkDevice) _, ok := device.(*drivers.VhostUserBlkDevice)
assert.True(t, ok) assert.True(t, ok)
err = device.Attach(devReceiver) err = device.Attach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(devReceiver) err = device.Detach(context.Background(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
} }
@ -309,15 +310,15 @@ func TestAttachDetachDevice(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
// attach non-exist device // attach non-exist device
err = dm.AttachDevice("non-exist", devReceiver) err = dm.AttachDevice(context.Background(), "non-exist", devReceiver)
assert.NotNil(t, err) assert.NotNil(t, err)
// attach device // attach device
err = dm.AttachDevice(device.DeviceID(), devReceiver) err = dm.AttachDevice(context.Background(), device.DeviceID(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, device.GetAttachCount(), uint(1), "attach device count should be 1") assert.Equal(t, device.GetAttachCount(), uint(1), "attach device count should be 1")
// attach device again(twice) // attach device again(twice)
err = dm.AttachDevice(device.DeviceID(), devReceiver) err = dm.AttachDevice(context.Background(), device.DeviceID(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, device.GetAttachCount(), uint(2), "attach device count should be 2") assert.Equal(t, device.GetAttachCount(), uint(2), "attach device count should be 2")
@ -325,15 +326,15 @@ func TestAttachDetachDevice(t *testing.T) {
assert.True(t, attached) assert.True(t, attached)
// detach device // detach device
err = dm.DetachDevice(device.DeviceID(), devReceiver) err = dm.DetachDevice(context.Background(), device.DeviceID(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, device.GetAttachCount(), uint(1), "attach device count should be 1") assert.Equal(t, device.GetAttachCount(), uint(1), "attach device count should be 1")
// detach device again(twice) // detach device again(twice)
err = dm.DetachDevice(device.DeviceID(), devReceiver) err = dm.DetachDevice(context.Background(), device.DeviceID(), devReceiver)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, device.GetAttachCount(), uint(0), "attach device count should be 0") assert.Equal(t, device.GetAttachCount(), uint(0), "attach device count should be 0")
// detach device again should report error // detach device again should report error
err = dm.DetachDevice(device.DeviceID(), devReceiver) err = dm.DetachDevice(context.Background(), device.DeviceID(), devReceiver)
assert.NotNil(t, err) assert.NotNil(t, err)
assert.Equal(t, err, ErrDeviceNotAttached, "") assert.Equal(t, err, ErrDeviceNotAttached, "")
assert.Equal(t, device.GetAttachCount(), uint(0), "attach device count should be 0") assert.Equal(t, device.GetAttachCount(), uint(0), "attach device count should be 0")

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api" persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
@ -23,10 +24,10 @@ type Endpoint interface {
SetProperties(NetworkInfo) SetProperties(NetworkInfo)
SetPciPath(vcTypes.PciPath) SetPciPath(vcTypes.PciPath)
Attach(*Sandbox) error Attach(context.Context, *Sandbox) error
Detach(netNsCreated bool, netNsPath string) error Detach(ctx context.Context, netNsCreated bool, netNsPath string) error
HotAttach(h hypervisor) error HotAttach(ctx context.Context, h hypervisor) error
HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error
save() persistapi.NetworkEndpoint save() persistapi.NetworkEndpoint
load(persistapi.NetworkEndpoint) load(persistapi.NetworkEndpoint)

View File

@ -62,8 +62,8 @@ func New(ctx context.Context, count uint, b base.FactoryBase) base.FactoryBase {
c.removeFromVmm(vm) c.removeFromVmm(vm)
case <-closed: case <-closed:
c.removeFromVmm(vm) c.removeFromVmm(vm)
vm.Stop() vm.Stop(ctx)
vm.Disconnect() vm.Disconnect(ctx)
c.wg.Done() c.wg.Done()
return return
} }

View File

@ -35,9 +35,9 @@ func (d *direct) GetBaseVM(ctx context.Context, config vc.VMConfig) (*vc.VM, err
return nil, err return nil, err
} }
err = vm.Pause() err = vm.Pause(ctx)
if err != nil { if err != nil {
vm.Stop() vm.Stop(ctx)
return nil, err return nil, err
} }

View File

@ -141,7 +141,7 @@ func (f *factory) checkConfig(config vc.VMConfig) error {
// GetVM returns a working blank VM created by the factory. // GetVM returns a working blank VM created by the factory.
func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) { func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
span, _ := trace(ctx, "GetVM") span, ctx := trace(ctx, "GetVM")
defer span.End() defer span.End()
hypervisorConfig := config.HypervisorConfig hypervisorConfig := config.HypervisorConfig
@ -167,23 +167,23 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
defer func() { defer func() {
if err != nil { if err != nil {
f.log().WithError(err).Error("clean up vm") f.log().WithError(err).Error("clean up vm")
vm.Stop() vm.Stop(ctx)
} }
}() }()
err = vm.Resume() err = vm.Resume(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// reseed RNG so that shared memory VMs do not generate same random numbers. // reseed RNG so that shared memory VMs do not generate same random numbers.
err = vm.ReseedRNG() err = vm.ReseedRNG(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// sync guest time since we might have paused it for a long time. // sync guest time since we might have paused it for a long time.
err = vm.SyncTime() err = vm.SyncTime(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -191,7 +191,7 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
online := false online := false
baseConfig := f.base.Config().HypervisorConfig baseConfig := f.base.Config().HypervisorConfig
if baseConfig.NumVCPUs < hypervisorConfig.NumVCPUs { if baseConfig.NumVCPUs < hypervisorConfig.NumVCPUs {
err = vm.AddCPUs(hypervisorConfig.NumVCPUs - baseConfig.NumVCPUs) err = vm.AddCPUs(ctx, hypervisorConfig.NumVCPUs - baseConfig.NumVCPUs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -199,7 +199,7 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
} }
if baseConfig.MemorySize < hypervisorConfig.MemorySize { if baseConfig.MemorySize < hypervisorConfig.MemorySize {
err = vm.AddMemory(hypervisorConfig.MemorySize - baseConfig.MemorySize) err = vm.AddMemory(ctx, hypervisorConfig.MemorySize - baseConfig.MemorySize)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -207,7 +207,7 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
} }
if online { if online {
err = vm.OnlineCPUMemory() err = vm.OnlineCPUMemory(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -124,9 +124,9 @@ func (t *template) createTemplateVM(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
defer vm.Stop() defer vm.Stop(ctx)
if err = vm.Disconnect(); err != nil { if err = vm.Disconnect(ctx); err != nil {
return err return err
} }
@ -139,7 +139,7 @@ func (t *template) createTemplateVM(ctx context.Context) error {
// created from template, so it worth the invest. // created from template, so it worth the invest.
time.Sleep(templateWaitForAgent) time.Sleep(templateWaitForAgent)
if err = vm.Pause(); err != nil { if err = vm.Pause(ctx); err != nil {
return err return err
} }

View File

@ -168,14 +168,14 @@ func (fc *firecracker) Logger() *logrus.Entry {
return virtLog.WithField("subsystem", "firecracker") return virtLog.WithField("subsystem", "firecracker")
} }
func (fc *firecracker) trace(name string) (otelTrace.Span, context.Context) { func (fc *firecracker) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if fc.ctx == nil { if parent == nil {
fc.Logger().WithField("type", "bug").Error("trace called before context set") fc.Logger().WithField("type", "bug").Error("trace called before context set")
fc.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(fc.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("firecracker")}...) span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("firecracker")}...)
return span, ctx return span, ctx
@ -199,7 +199,8 @@ func (fc *firecracker) truncateID(id string) string {
func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error { func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
fc.ctx = ctx fc.ctx = ctx
span, _ := fc.trace("createSandbox") var span otelTrace.Span
span, ctx = fc.trace(ctx, "createSandbox")
defer span.End() defer span.End()
//TODO: check validity of the hypervisor config provided //TODO: check validity of the hypervisor config provided
@ -241,8 +242,8 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
return nil return nil
} }
func (fc *firecracker) newFireClient() *client.Firecracker { func (fc *firecracker) newFireClient(ctx context.Context) *client.Firecracker {
span, _ := fc.trace("newFireClient") span, _ := fc.trace(ctx, "newFireClient")
defer span.End() defer span.End()
httpClient := client.NewHTTPClient(strfmt.NewFormats()) httpClient := client.NewHTTPClient(strfmt.NewFormats())
@ -266,8 +267,8 @@ func (fc *firecracker) newFireClient() *client.Firecracker {
return httpClient return httpClient
} }
func (fc *firecracker) vmRunning() bool { func (fc *firecracker) vmRunning(ctx context.Context) bool {
resp, err := fc.client().Operations.DescribeInstance(nil) resp, err := fc.client(ctx).Operations.DescribeInstance(nil)
if err != nil { if err != nil {
fc.Logger().WithError(err).Error("getting vm status failed") fc.Logger().WithError(err).Error("getting vm status failed")
return false return false
@ -323,8 +324,8 @@ func (fc *firecracker) checkVersion(version string) error {
} }
// waitVMMRunning will wait for timeout seconds for the VMM to be up and running. // waitVMMRunning will wait for timeout seconds for the VMM to be up and running.
func (fc *firecracker) waitVMMRunning(timeout int) error { func (fc *firecracker) waitVMMRunning(ctx context.Context, timeout int) error {
span, _ := fc.trace("wait VMM to be running") span, ctx := fc.trace(ctx, "wait VMM to be running")
defer span.End() defer span.End()
if timeout < 0 { if timeout < 0 {
@ -333,7 +334,7 @@ func (fc *firecracker) waitVMMRunning(timeout int) error {
timeStart := time.Now() timeStart := time.Now()
for { for {
if fc.vmRunning() { if fc.vmRunning(ctx) {
return nil return nil
} }
@ -345,8 +346,8 @@ func (fc *firecracker) waitVMMRunning(timeout int) error {
} }
} }
func (fc *firecracker) fcInit(timeout int) error { func (fc *firecracker) fcInit(ctx context.Context, timeout int) error {
span, _ := fc.trace("fcInit") span, ctx := fc.trace(ctx, "fcInit")
defer span.End() defer span.End()
var err error var err error
@ -411,17 +412,17 @@ func (fc *firecracker) fcInit(timeout int) error {
fc.info.PID = cmd.Process.Pid fc.info.PID = cmd.Process.Pid
fc.firecrackerd = cmd fc.firecrackerd = cmd
fc.connection = fc.newFireClient() fc.connection = fc.newFireClient(ctx)
if err := fc.waitVMMRunning(timeout); err != nil { if err := fc.waitVMMRunning(ctx, timeout); err != nil {
fc.Logger().WithField("fcInit failed:", err).Debug() fc.Logger().WithField("fcInit failed:", err).Debug()
return err return err
} }
return nil return nil
} }
func (fc *firecracker) fcEnd() (err error) { func (fc *firecracker) fcEnd(ctx context.Context) (err error) {
span, _ := fc.trace("fcEnd") span, _ := fc.trace(ctx, "fcEnd")
defer span.End() defer span.End()
fc.Logger().Info("Stopping firecracker VM") fc.Logger().Info("Stopping firecracker VM")
@ -465,12 +466,12 @@ func (fc *firecracker) fcEnd() (err error) {
return syscall.Kill(pid, syscall.SIGKILL) return syscall.Kill(pid, syscall.SIGKILL)
} }
func (fc *firecracker) client() *client.Firecracker { func (fc *firecracker) client(ctx context.Context) *client.Firecracker {
span, _ := fc.trace("client") span, ctx := fc.trace(ctx, "client")
defer span.End() defer span.End()
if fc.connection == nil { if fc.connection == nil {
fc.connection = fc.newFireClient() fc.connection = fc.newFireClient(ctx)
} }
return fc.connection return fc.connection
@ -532,8 +533,8 @@ func (fc *firecracker) fcJailResource(src, dst string) (string, error) {
return absPath, nil return absPath, nil
} }
func (fc *firecracker) fcSetBootSource(path, params string) error { func (fc *firecracker) fcSetBootSource(ctx context.Context, path, params string) error {
span, _ := fc.trace("fcSetBootSource") span, _ := fc.trace(ctx, "fcSetBootSource")
defer span.End() defer span.End()
fc.Logger().WithFields(logrus.Fields{"kernel-path": path, fc.Logger().WithFields(logrus.Fields{"kernel-path": path,
"kernel-params": params}).Debug("fcSetBootSource") "kernel-params": params}).Debug("fcSetBootSource")
@ -553,8 +554,8 @@ func (fc *firecracker) fcSetBootSource(path, params string) error {
return nil return nil
} }
func (fc *firecracker) fcSetVMRootfs(path string) error { func (fc *firecracker) fcSetVMRootfs(ctx context.Context, path string) error {
span, _ := fc.trace("fcSetVMRootfs") span, _ := fc.trace(ctx, "fcSetVMRootfs")
defer span.End() defer span.End()
jailedRootfs, err := fc.fcJailResource(path, fcRootfs) jailedRootfs, err := fc.fcJailResource(path, fcRootfs)
@ -580,8 +581,8 @@ func (fc *firecracker) fcSetVMRootfs(path string) error {
return nil return nil
} }
func (fc *firecracker) fcSetVMBaseConfig(mem int64, vcpus int64, htEnabled bool) { func (fc *firecracker) fcSetVMBaseConfig(ctx context.Context, mem int64, vcpus int64, htEnabled bool) {
span, _ := fc.trace("fcSetVMBaseConfig") span, _ := fc.trace(ctx, "fcSetVMBaseConfig")
defer span.End() defer span.End()
fc.Logger().WithFields(logrus.Fields{"mem": mem, fc.Logger().WithFields(logrus.Fields{"mem": mem,
"vcpus": vcpus, "vcpus": vcpus,
@ -596,8 +597,8 @@ func (fc *firecracker) fcSetVMBaseConfig(mem int64, vcpus int64, htEnabled bool)
fc.fcConfig.MachineConfig = cfg fc.fcConfig.MachineConfig = cfg
} }
func (fc *firecracker) fcSetLogger() error { func (fc *firecracker) fcSetLogger(ctx context.Context) error {
span, _ := fc.trace("fcSetLogger") span, _ := fc.trace(ctx, "fcSetLogger")
defer span.End() defer span.End()
fcLogLevel := "Error" fcLogLevel := "Error"
@ -673,7 +674,7 @@ func (fc *firecracker) fcListenToFifo(fifoName string, consumer fifoConsumer) (s
return jailedFifoPath, nil return jailedFifoPath, nil
} }
func (fc *firecracker) fcInitConfiguration() error { func (fc *firecracker) fcInitConfiguration(ctx context.Context) error {
// Firecracker API socket(firecracker.socket) is automatically created // Firecracker API socket(firecracker.socket) is automatically created
// under /run dir. // under /run dir.
err := os.MkdirAll(filepath.Join(fc.jailerRoot, "run"), DirMode) err := os.MkdirAll(filepath.Join(fc.jailerRoot, "run"), DirMode)
@ -695,7 +696,7 @@ func (fc *firecracker) fcInitConfiguration() error {
} }
} }
fc.fcSetVMBaseConfig(int64(fc.config.MemorySize), fc.fcSetVMBaseConfig(ctx, int64(fc.config.MemorySize),
int64(fc.config.NumVCPUs), false) int64(fc.config.NumVCPUs), false)
kernelPath, err := fc.config.KernelAssetPath() kernelPath, err := fc.config.KernelAssetPath()
@ -716,7 +717,7 @@ func (fc *firecracker) fcInitConfiguration() error {
kernelParams := append(fc.config.KernelParams, fcKernelParams...) kernelParams := append(fc.config.KernelParams, fcKernelParams...)
strParams := SerializeParams(kernelParams, "=") strParams := SerializeParams(kernelParams, "=")
formattedParams := strings.Join(strParams, " ") formattedParams := strings.Join(strParams, " ")
if err := fc.fcSetBootSource(kernelPath, formattedParams); err != nil { if err := fc.fcSetBootSource(ctx, kernelPath, formattedParams); err != nil {
return err return err
} }
@ -732,21 +733,21 @@ func (fc *firecracker) fcInitConfiguration() error {
} }
} }
if err := fc.fcSetVMRootfs(image); err != nil { if err := fc.fcSetVMRootfs(ctx, image); err != nil {
return err return err
} }
if err := fc.createDiskPool(); err != nil { if err := fc.createDiskPool(ctx); err != nil {
return err return err
} }
if err := fc.fcSetLogger(); err != nil { if err := fc.fcSetLogger(ctx); err != nil {
return err return err
} }
fc.state.set(cfReady) fc.state.set(cfReady)
for _, d := range fc.pendingDevices { for _, d := range fc.pendingDevices {
if err := fc.addDevice(d.dev, d.devType); err != nil { if err := fc.addDevice(ctx, d.dev, d.devType); err != nil {
return err return err
} }
} }
@ -760,11 +761,11 @@ func (fc *firecracker) fcInitConfiguration() error {
// startSandbox will start the hypervisor for the given sandbox. // startSandbox will start the hypervisor for the given sandbox.
// In the context of firecracker, this will start the hypervisor, // In the context of firecracker, this will start the hypervisor,
// for configuration, but not yet start the actual virtual machine // for configuration, but not yet start the actual virtual machine
func (fc *firecracker) startSandbox(timeout int) error { func (fc *firecracker) startSandbox(ctx context.Context, timeout int) error {
span, _ := fc.trace("startSandbox") span, ctx := fc.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if err := fc.fcInitConfiguration(); err != nil { if err := fc.fcInitConfiguration(ctx); err != nil {
return err return err
} }
@ -780,7 +781,7 @@ func (fc *firecracker) startSandbox(timeout int) error {
var err error var err error
defer func() { defer func() {
if err != nil { if err != nil {
fc.fcEnd() fc.fcEnd(ctx)
} }
}() }()
@ -793,7 +794,7 @@ func (fc *firecracker) startSandbox(timeout int) error {
} }
defer label.SetProcessLabel("") defer label.SetProcessLabel("")
err = fc.fcInit(fcTimeout) err = fc.fcInit(ctx, fcTimeout)
if err != nil { if err != nil {
return err return err
} }
@ -812,8 +813,8 @@ func fcDriveIndexToID(i int) string {
return "drive_" + strconv.Itoa(i) return "drive_" + strconv.Itoa(i)
} }
func (fc *firecracker) createDiskPool() error { func (fc *firecracker) createDiskPool(ctx context.Context) error {
span, _ := fc.trace("createDiskPool") span, _ := fc.trace(ctx, "createDiskPool")
defer span.End() defer span.End()
for i := 0; i < fcDiskPoolSize; i++ { for i := 0; i < fcDiskPoolSize; i++ {
@ -850,8 +851,8 @@ func (fc *firecracker) umountResource(jailedPath string) {
} }
// cleanup all jail artifacts // cleanup all jail artifacts
func (fc *firecracker) cleanupJail() { func (fc *firecracker) cleanupJail(ctx context.Context) {
span, _ := fc.trace("cleanupJail") span, _ := fc.trace(ctx, "cleanupJail")
defer span.End() defer span.End()
fc.umountResource(fcKernel) fc.umountResource(fcKernel)
@ -873,14 +874,14 @@ func (fc *firecracker) cleanupJail() {
} }
// stopSandbox will stop the Sandbox's VM. // stopSandbox will stop the Sandbox's VM.
func (fc *firecracker) stopSandbox() (err error) { func (fc *firecracker) stopSandbox(ctx context.Context) (err error) {
span, _ := fc.trace("stopSandbox") span, ctx := fc.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
return fc.fcEnd() return fc.fcEnd(ctx)
} }
func (fc *firecracker) pauseSandbox() error { func (fc *firecracker) pauseSandbox(ctx context.Context) error {
return nil return nil
} }
@ -888,12 +889,12 @@ func (fc *firecracker) saveSandbox() error {
return nil return nil
} }
func (fc *firecracker) resumeSandbox() error { func (fc *firecracker) resumeSandbox(ctx context.Context) error {
return nil return nil
} }
func (fc *firecracker) fcAddVsock(hvs types.HybridVSock) { func (fc *firecracker) fcAddVsock(ctx context.Context, hvs types.HybridVSock) {
span, _ := fc.trace("fcAddVsock") span, _ := fc.trace(ctx, "fcAddVsock")
defer span.End() defer span.End()
udsPath := hvs.UdsPath udsPath := hvs.UdsPath
@ -912,8 +913,8 @@ func (fc *firecracker) fcAddVsock(hvs types.HybridVSock) {
fc.fcConfig.Vsock = vsock fc.fcConfig.Vsock = vsock
} }
func (fc *firecracker) fcAddNetDevice(endpoint Endpoint) { func (fc *firecracker) fcAddNetDevice(ctx context.Context, endpoint Endpoint) {
span, _ := fc.trace("fcAddNetDevice") span, _ := fc.trace(ctx, "fcAddNetDevice")
defer span.End() defer span.End()
ifaceID := endpoint.Name() ifaceID := endpoint.Name()
@ -968,8 +969,8 @@ func (fc *firecracker) fcAddNetDevice(endpoint Endpoint) {
fc.fcConfig.NetworkInterfaces = append(fc.fcConfig.NetworkInterfaces, ifaceCfg) fc.fcConfig.NetworkInterfaces = append(fc.fcConfig.NetworkInterfaces, ifaceCfg)
} }
func (fc *firecracker) fcAddBlockDrive(drive config.BlockDrive) error { func (fc *firecracker) fcAddBlockDrive(ctx context.Context, drive config.BlockDrive) error {
span, _ := fc.trace("fcAddBlockDrive") span, _ := fc.trace(ctx, "fcAddBlockDrive")
defer span.End() defer span.End()
driveID := drive.ID driveID := drive.ID
@ -994,8 +995,8 @@ func (fc *firecracker) fcAddBlockDrive(drive config.BlockDrive) error {
} }
// Firecracker supports replacing the host drive used once the VM has booted up // Firecracker supports replacing the host drive used once the VM has booted up
func (fc *firecracker) fcUpdateBlockDrive(path, id string) error { func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string) error {
span, _ := fc.trace("fcUpdateBlockDrive") span, ctx := fc.trace(ctx, "fcUpdateBlockDrive")
defer span.End() defer span.End()
// Use the global block index as an index into the pool of the devices // Use the global block index as an index into the pool of the devices
@ -1009,7 +1010,7 @@ func (fc *firecracker) fcUpdateBlockDrive(path, id string) error {
} }
driveParams.SetBody(driveFc) driveParams.SetBody(driveFc)
if _, err := fc.client().Operations.PatchGuestDriveByID(driveParams); err != nil { if _, err := fc.client(ctx).Operations.PatchGuestDriveByID(driveParams); err != nil {
return err return err
} }
@ -1018,8 +1019,8 @@ func (fc *firecracker) fcUpdateBlockDrive(path, id string) error {
// addDevice will add extra devices to firecracker. Limited to configure before the // addDevice will add extra devices to firecracker. Limited to configure before the
// virtual machine starts. Devices include drivers and network interfaces only. // virtual machine starts. Devices include drivers and network interfaces only.
func (fc *firecracker) addDevice(devInfo interface{}, devType deviceType) error { func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
span, _ := fc.trace("addDevice") span, ctx := fc.trace(ctx, "addDevice")
defer span.End() defer span.End()
fc.state.RLock() fc.state.RLock()
@ -1039,13 +1040,13 @@ func (fc *firecracker) addDevice(devInfo interface{}, devType deviceType) error
switch v := devInfo.(type) { switch v := devInfo.(type) {
case Endpoint: case Endpoint:
fc.Logger().WithField("device-type-endpoint", devInfo).Info("Adding device") fc.Logger().WithField("device-type-endpoint", devInfo).Info("Adding device")
fc.fcAddNetDevice(v) fc.fcAddNetDevice(ctx, v)
case config.BlockDrive: case config.BlockDrive:
fc.Logger().WithField("device-type-blockdrive", devInfo).Info("Adding device") fc.Logger().WithField("device-type-blockdrive", devInfo).Info("Adding device")
err = fc.fcAddBlockDrive(v) err = fc.fcAddBlockDrive(ctx, v)
case types.HybridVSock: case types.HybridVSock:
fc.Logger().WithField("device-type-hybrid-vsock", devInfo).Info("Adding device") fc.Logger().WithField("device-type-hybrid-vsock", devInfo).Info("Adding device")
fc.fcAddVsock(v) fc.fcAddVsock(ctx, v)
default: default:
fc.Logger().WithField("unknown-device-type", devInfo).Error("Adding device") fc.Logger().WithField("unknown-device-type", devInfo).Error("Adding device")
} }
@ -1055,7 +1056,7 @@ func (fc *firecracker) addDevice(devInfo interface{}, devType deviceType) error
// hotplugBlockDevice supported in Firecracker VMM // hotplugBlockDevice supported in Firecracker VMM
// hot add or remove a block device. // hot add or remove a block device.
func (fc *firecracker) hotplugBlockDevice(drive config.BlockDrive, op operation) (interface{}, error) { func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.BlockDrive, op operation) (interface{}, error) {
var path string var path string
var err error var err error
driveID := fcDriveIndexToID(drive.Index) driveID := fcDriveIndexToID(drive.Index)
@ -1075,17 +1076,17 @@ func (fc *firecracker) hotplugBlockDevice(drive config.BlockDrive, op operation)
path = filepath.Join(fc.jailerRoot, driveID) path = filepath.Join(fc.jailerRoot, driveID)
} }
return nil, fc.fcUpdateBlockDrive(path, driveID) return nil, fc.fcUpdateBlockDrive(ctx, path, driveID)
} }
// hotplugAddDevice supported in Firecracker VMM // hotplugAddDevice supported in Firecracker VMM
func (fc *firecracker) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := fc.trace("hotplugAddDevice") span, ctx := fc.trace(ctx, "hotplugAddDevice")
defer span.End() defer span.End()
switch devType { switch devType {
case blockDev: case blockDev:
return fc.hotplugBlockDevice(*devInfo.(*config.BlockDrive), addDevice) return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), addDevice)
default: default:
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo, fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
"deviceType": devType}).Warn("hotplugAddDevice: unsupported device") "deviceType": devType}).Warn("hotplugAddDevice: unsupported device")
@ -1095,13 +1096,13 @@ func (fc *firecracker) hotplugAddDevice(devInfo interface{}, devType deviceType)
} }
// hotplugRemoveDevice supported in Firecracker VMM // hotplugRemoveDevice supported in Firecracker VMM
func (fc *firecracker) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := fc.trace("hotplugRemoveDevice") span, ctx := fc.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
switch devType { switch devType {
case blockDev: case blockDev:
return fc.hotplugBlockDevice(*devInfo.(*config.BlockDrive), removeDevice) return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), removeDevice)
default: default:
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo, fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
"deviceType": devType}).Error("hotplugRemoveDevice: unsupported device") "deviceType": devType}).Error("hotplugRemoveDevice: unsupported device")
@ -1112,7 +1113,7 @@ func (fc *firecracker) hotplugRemoveDevice(devInfo interface{}, devType deviceTy
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (fc *firecracker) getSandboxConsole(id string) (string, string, error) { func (fc *firecracker) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
master, slave, err := console.NewPty() master, slave, err := console.NewPty()
if err != nil { if err != nil {
fc.Logger().Debugf("Error create pseudo tty: %v", err) fc.Logger().Debugf("Error create pseudo tty: %v", err)
@ -1123,13 +1124,13 @@ func (fc *firecracker) getSandboxConsole(id string) (string, string, error) {
return consoleProtoPty, slave, nil return consoleProtoPty, slave, nil
} }
func (fc *firecracker) disconnect() { func (fc *firecracker) disconnect(ctx context.Context) {
fc.state.set(notReady) fc.state.set(notReady)
} }
// Adds all capabilities supported by firecracker implementation of hypervisor interface // Adds all capabilities supported by firecracker implementation of hypervisor interface
func (fc *firecracker) capabilities() types.Capabilities { func (fc *firecracker) capabilities(ctx context.Context) types.Capabilities {
span, _ := fc.trace("capabilities") span, _ := fc.trace(ctx, "capabilities")
defer span.End() defer span.End()
var caps types.Capabilities var caps types.Capabilities
caps.SetBlockDeviceHotplugSupport() caps.SetBlockDeviceHotplugSupport()
@ -1141,11 +1142,11 @@ func (fc *firecracker) hypervisorConfig() HypervisorConfig {
return fc.config return fc.config
} }
func (fc *firecracker) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) { func (fc *firecracker) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
return 0, memoryDevice{}, nil return 0, memoryDevice{}, nil
} }
func (fc *firecracker) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (fc *firecracker) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
return 0, 0, nil return 0, 0, nil
} }
@ -1153,7 +1154,7 @@ func (fc *firecracker) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCP
// //
// As suggested by https://github.com/firecracker-microvm/firecracker/issues/718, // As suggested by https://github.com/firecracker-microvm/firecracker/issues/718,
// let's use `ps -T -p <pid>` to get fc vcpu info. // let's use `ps -T -p <pid>` to get fc vcpu info.
func (fc *firecracker) getThreadIDs() (vcpuThreadIDs, error) { func (fc *firecracker) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
var vcpuInfo vcpuThreadIDs var vcpuInfo vcpuThreadIDs
vcpuInfo.vcpus = make(map[int]int) vcpuInfo.vcpus = make(map[int]int)
@ -1187,8 +1188,8 @@ func (fc *firecracker) getThreadIDs() (vcpuThreadIDs, error) {
return vcpuInfo, nil return vcpuInfo, nil
} }
func (fc *firecracker) cleanup() error { func (fc *firecracker) cleanup(ctx context.Context) error {
fc.cleanupJail() fc.cleanupJail(ctx)
return nil return nil
} }
@ -1200,7 +1201,7 @@ func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *Hyperviso
return errors.New("firecracker is not supported by VM cache") return errors.New("firecracker is not supported by VM cache")
} }
func (fc *firecracker) toGrpc() ([]byte, error) { func (fc *firecracker) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("firecracker is not supported by VM cache") return nil, errors.New("firecracker is not supported by VM cache")
} }

View File

@ -786,27 +786,27 @@ func generateVMSocket(id string, vmStogarePath string) (interface{}, error) {
// The default hypervisor implementation is Qemu. // The default hypervisor implementation is Qemu.
type hypervisor interface { type hypervisor interface {
createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error
startSandbox(timeout int) error startSandbox(ctx context.Context, timeout int) error
stopSandbox() error stopSandbox(ctx context.Context) error
pauseSandbox() error pauseSandbox(ctx context.Context) error
saveSandbox() error saveSandbox() error
resumeSandbox() error resumeSandbox(ctx context.Context) error
addDevice(devInfo interface{}, devType deviceType) error addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error
hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error)
hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error)
resizeMemory(memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) resizeMemory(ctx context.Context, memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error)
resizeVCPUs(vcpus uint32) (uint32, uint32, error) resizeVCPUs(ctx context.Context, vcpus uint32) (uint32, uint32, error)
getSandboxConsole(sandboxID string) (string, string, error) getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error)
disconnect() disconnect(ctx context.Context)
capabilities() types.Capabilities capabilities(ctx context.Context) types.Capabilities
hypervisorConfig() HypervisorConfig hypervisorConfig() HypervisorConfig
getThreadIDs() (vcpuThreadIDs, error) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error)
cleanup() error cleanup(ctx context.Context) error
// getPids returns a slice of hypervisor related process ids. // getPids returns a slice of hypervisor related process ids.
// The hypervisor pid must be put at index 0. // The hypervisor pid must be put at index 0.
getPids() []int getPids() []int
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error
toGrpc() ([]byte, error) toGrpc(ctx context.Context) ([]byte, error)
check() error check() error
save() persistapi.HypervisorState save() persistapi.HypervisorState

View File

@ -38,44 +38,44 @@ type VCSandbox interface {
ID() string ID() string
SetAnnotations(annotations map[string]string) error SetAnnotations(annotations map[string]string) error
Stats() (SandboxStats, error) Stats(ctx context.Context) (SandboxStats, error)
Start() error Start(ctx context.Context) error
Stop(force bool) error Stop(ctx context.Context, force bool) error
Release() error Release(ctx context.Context) error
Monitor() (chan error, error) Monitor(ctx context.Context) (chan error, error)
Delete() error Delete(ctx context.Context) error
Status() SandboxStatus Status() SandboxStatus
CreateContainer(contConfig ContainerConfig) (VCContainer, error) CreateContainer(ctx context.Context, contConfig ContainerConfig) (VCContainer, error)
DeleteContainer(contID string) (VCContainer, error) DeleteContainer(ctx context.Context, contID string) (VCContainer, error)
StartContainer(containerID string) (VCContainer, error) StartContainer(ctx context.Context, containerID string) (VCContainer, error)
StopContainer(containerID string, force bool) (VCContainer, error) StopContainer(ctx context.Context, containerID string, force bool) (VCContainer, error)
KillContainer(containerID string, signal syscall.Signal, all bool) error KillContainer(ctx context.Context, containerID string, signal syscall.Signal, all bool) error
StatusContainer(containerID string) (ContainerStatus, error) StatusContainer(containerID string) (ContainerStatus, error)
StatsContainer(containerID string) (ContainerStats, error) StatsContainer(ctx context.Context, containerID string) (ContainerStats, error)
PauseContainer(containerID string) error PauseContainer(ctx context.Context, containerID string) error
ResumeContainer(containerID string) error ResumeContainer(ctx context.Context, containerID string) error
EnterContainer(containerID string, cmd types.Cmd) (VCContainer, *Process, error) EnterContainer(ctx context.Context, containerID string, cmd types.Cmd) (VCContainer, *Process, error)
UpdateContainer(containerID string, resources specs.LinuxResources) error UpdateContainer(ctx context.Context, containerID string, resources specs.LinuxResources) error
ProcessListContainer(containerID string, options ProcessListOptions) (ProcessList, error) ProcessListContainer(ctx context.Context, containerID string, options ProcessListOptions) (ProcessList, error)
WaitProcess(containerID, processID string) (int32, error) WaitProcess(ctx context.Context, containerID, processID string) (int32, error)
SignalProcess(containerID, processID string, signal syscall.Signal, all bool) error SignalProcess(ctx context.Context, containerID, processID string, signal syscall.Signal, all bool) error
WinsizeProcess(containerID, processID string, height, width uint32) error WinsizeProcess(ctx context.Context, containerID, processID string, height, width uint32) error
IOStream(containerID, processID string) (io.WriteCloser, io.Reader, io.Reader, error) IOStream(containerID, processID string) (io.WriteCloser, io.Reader, io.Reader, error)
AddDevice(info config.DeviceInfo) (api.Device, error) AddDevice(ctx context.Context, info config.DeviceInfo) (api.Device, error)
AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) AddInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error)
RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) RemoveInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error)
ListInterfaces() ([]*pbTypes.Interface, error) ListInterfaces(ctx context.Context) ([]*pbTypes.Interface, error)
UpdateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) UpdateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error)
ListRoutes() ([]*pbTypes.Route, error) ListRoutes(ctx context.Context) ([]*pbTypes.Route, error)
GetOOMEvent() (string, error) GetOOMEvent(ctx context.Context) (string, error)
GetHypervisorPid() (int, error) GetHypervisorPid() (int, error)
UpdateRuntimeMetrics() error UpdateRuntimeMetrics() error
GetAgentMetrics() (string, error) GetAgentMetrics(ctx context.Context) (string, error)
GetAgentURL() (string, error) GetAgentURL() (string, error)
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"errors" "errors"
"io" "io"
) )
@ -58,7 +59,8 @@ func (s *stdinStream) Write(data []byte) (n int, err error) {
return 0, errors.New("stream closed") return 0, errors.New("stream closed")
} }
return s.sandbox.agent.writeProcessStdin(s.container, s.process, data) // can not pass context to Write(), so use background context
return s.sandbox.agent.writeProcessStdin(context.Background(), s.container, s.process, data)
} }
func (s *stdinStream) Close() error { func (s *stdinStream) Close() error {
@ -66,7 +68,8 @@ func (s *stdinStream) Close() error {
return errors.New("stream closed") return errors.New("stream closed")
} }
err := s.sandbox.agent.closeProcessStdin(s.container, s.process) // can not pass context to Close(), so use background context
err := s.sandbox.agent.closeProcessStdin(context.Background(), s.container, s.process)
if err == nil { if err == nil {
s.closed = true s.closed = true
} }
@ -79,7 +82,8 @@ func (s *stdoutStream) Read(data []byte) (n int, err error) {
return 0, errors.New("stream closed") return 0, errors.New("stream closed")
} }
return s.sandbox.agent.readProcessStdout(s.container, s.process, data) // can not pass context to Read(), so use background context
return s.sandbox.agent.readProcessStdout(context.Background(), s.container, s.process, data)
} }
func (s *stderrStream) Read(data []byte) (n int, err error) { func (s *stderrStream) Read(data []byte) (n int, err error) {
@ -87,5 +91,6 @@ func (s *stderrStream) Read(data []byte) (n int, err error) {
return 0, errors.New("stream closed") return 0, errors.New("stream closed")
} }
return s.sandbox.agent.readProcessStderr(s.container, s.process, data) // can not pass context to Read(), so use background context
return s.sandbox.agent.readProcessStderr(context.Background(), s.container, s.process, data)
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/ns"
@ -90,19 +91,19 @@ func (endpoint *IPVlanEndpoint) NetworkPair() *NetworkInterfacePair {
// Attach for virtual endpoint bridges the network pair and adds the // Attach for virtual endpoint bridges the network pair and adds the
// tap interface of the network pair to the hypervisor. // tap interface of the network pair to the hypervisor.
func (endpoint *IPVlanEndpoint) Attach(s *Sandbox) error { func (endpoint *IPVlanEndpoint) Attach(ctx context.Context, s *Sandbox) error {
h := s.hypervisor h := s.hypervisor
if err := xConnectVMNetwork(endpoint, h); err != nil { if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
networkLogger().WithError(err).Error("Error bridging virtual ep") networkLogger().WithError(err).Error("Error bridging virtual ep")
return err return err
} }
return h.addDevice(endpoint, netDev) return h.addDevice(ctx, endpoint, netDev)
} }
// Detach for the virtual endpoint tears down the tap and bridge // Detach for the virtual endpoint tears down the tap and bridge
// created for the veth interface. // created for the veth interface.
func (endpoint *IPVlanEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *IPVlanEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
// The network namespace would have been deleted at this point // The network namespace would have been deleted at this point
// if it has not been created by virtcontainers. // if it has not been created by virtcontainers.
if !netNsCreated { if !netNsCreated {
@ -115,12 +116,12 @@ func (endpoint *IPVlanEndpoint) Detach(netNsCreated bool, netNsPath string) erro
} }
// HotAttach for physical endpoint not supported yet // HotAttach for physical endpoint not supported yet
func (endpoint *IPVlanEndpoint) HotAttach(h hypervisor) error { func (endpoint *IPVlanEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
return fmt.Errorf("IPVlanEndpoint does not support Hot attach") return fmt.Errorf("IPVlanEndpoint does not support Hot attach")
} }
// HotDetach for physical endpoint not supported yet // HotDetach for physical endpoint not supported yet
func (endpoint *IPVlanEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *IPVlanEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("IPVlanEndpoint does not support Hot detach") return fmt.Errorf("IPVlanEndpoint does not support Hot detach")
} }

View File

@ -236,14 +236,14 @@ type kataAgent struct {
ctx context.Context ctx context.Context
} }
func (k *kataAgent) trace(name string) (otelTrace.Span, context.Context) { func (k *kataAgent) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if k.ctx == nil { if parent == nil {
k.Logger().WithField("type", "bug").Error("trace called before context set") k.Logger().WithField("type", "bug").Error("trace called before context set")
k.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(k.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes([]label.KeyValue{label.Key("subsystem").String("agent"), label.Key("type").String("kata")}...) span.SetAttributes([]label.KeyValue{label.Key("subsystem").String("agent"), label.Key("type").String("kata")}...)
return span, ctx return span, ctx
@ -332,7 +332,7 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgent
// save // save
k.ctx = sandbox.ctx k.ctx = sandbox.ctx
span, _ := k.trace("init") span, _ := k.trace(ctx, "init")
defer span.End() defer span.End()
disableVMShutdown = k.handleTraceSettings(config) disableVMShutdown = k.handleTraceSettings(config)
@ -425,7 +425,7 @@ func cleanupSandboxBindMounts(sandbox *Sandbox) error {
return nil return nil
} }
func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interface{}) error { func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath string, config interface{}) error {
err := k.internalConfigure(h, id, config) err := k.internalConfigure(h, id, config)
if err != nil { if err != nil {
return err return err
@ -433,11 +433,11 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interfa
switch s := k.vmSocket.(type) { switch s := k.vmSocket.(type) {
case types.VSock: case types.VSock:
if err = h.addDevice(s, vSockPCIDev); err != nil { if err = h.addDevice(ctx, s, vSockPCIDev); err != nil {
return err return err
} }
case types.HybridVSock: case types.HybridVSock:
err = h.addDevice(s, hybridVirtioVsockDev) err = h.addDevice(ctx, s, hybridVirtioVsockDev)
if err != nil { if err != nil {
return err return err
} }
@ -448,7 +448,7 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interfa
// Neither create shared directory nor add 9p device if hypervisor // Neither create shared directory nor add 9p device if hypervisor
// doesn't support filesystem sharing. // doesn't support filesystem sharing.
caps := h.capabilities() caps := h.capabilities(ctx)
if !caps.IsFsSharingSupported() { if !caps.IsFsSharingSupported() {
return nil return nil
} }
@ -464,14 +464,14 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interfa
return err return err
} }
return h.addDevice(sharedVolume, fsDev) return h.addDevice(ctx, sharedVolume, fsDev)
} }
func (k *kataAgent) configureFromGrpc(h hypervisor, id string, config interface{}) error { func (k *kataAgent) configureFromGrpc(h hypervisor, id string, config interface{}) error {
return k.internalConfigure(h, id, config) return k.internalConfigure(h, id, config)
} }
func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error { func (k *kataAgent) setupSharedPath(ctx context.Context, sandbox *Sandbox) error {
// create shared path structure // create shared path structure
sharePath := getSharePath(sandbox.id) sharePath := getSharePath(sandbox.id)
mountPath := getMountPath(sandbox.id) mountPath := getMountPath(sandbox.id)
@ -483,7 +483,7 @@ func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error {
} }
// slave mount so that future mountpoints under mountPath are shown in sharePath as well // slave mount so that future mountpoints under mountPath are shown in sharePath as well
if err := bindMount(context.Background(), mountPath, sharePath, true, "slave"); err != nil { if err := bindMount(ctx, mountPath, sharePath, true, "slave"); err != nil {
return err return err
} }
@ -495,14 +495,14 @@ func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error {
return nil return nil
} }
func (k *kataAgent) createSandbox(sandbox *Sandbox) error { func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
span, _ := k.trace("createSandbox") span, ctx := k.trace(ctx, "createSandbox")
defer span.End() defer span.End()
if err := k.setupSharedPath(sandbox); err != nil { if err := k.setupSharedPath(ctx, sandbox); err != nil {
return err return err
} }
return k.configure(sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), sandbox.config.AgentConfig) return k.configure(ctx, sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), sandbox.config.AgentConfig)
} }
func cmdToKataProcess(cmd types.Cmd) (process *grpc.Process, err error) { func cmdToKataProcess(cmd types.Cmd) (process *grpc.Process, err error) {
@ -582,8 +582,8 @@ func cmdEnvsToStringSlice(ev []types.EnvVar) []string {
return env return env
} }
func (k *kataAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) { func (k *kataAgent) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {
span, _ := k.trace("exec") span, ctx := k.trace(ctx, "exec")
defer span.End() defer span.End()
var kataProcess *grpc.Process var kataProcess *grpc.Process
@ -599,19 +599,19 @@ func (k *kataAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process
Process: kataProcess, Process: kataProcess,
} }
if _, err := k.sendReq(req); err != nil { if _, err := k.sendReq(ctx, req); err != nil {
return nil, err return nil, err
} }
return buildProcessFromExecID(req.ExecId) return buildProcessFromExecID(req.ExecId)
} }
func (k *kataAgent) updateInterface(ifc *pbTypes.Interface) (*pbTypes.Interface, error) { func (k *kataAgent) updateInterface(ctx context.Context, ifc *pbTypes.Interface) (*pbTypes.Interface, error) {
// send update interface request // send update interface request
ifcReq := &grpc.UpdateInterfaceRequest{ ifcReq := &grpc.UpdateInterfaceRequest{
Interface: ifc, Interface: ifc,
} }
resultingInterface, err := k.sendReq(ifcReq) resultingInterface, err := k.sendReq(ctx, ifcReq)
if err != nil { if err != nil {
k.Logger().WithFields(logrus.Fields{ k.Logger().WithFields(logrus.Fields{
"interface-requested": fmt.Sprintf("%+v", ifc), "interface-requested": fmt.Sprintf("%+v", ifc),
@ -624,23 +624,23 @@ func (k *kataAgent) updateInterface(ifc *pbTypes.Interface) (*pbTypes.Interface,
return nil, err return nil, err
} }
func (k *kataAgent) updateInterfaces(interfaces []*pbTypes.Interface) error { func (k *kataAgent) updateInterfaces(ctx context.Context, interfaces []*pbTypes.Interface) error {
for _, ifc := range interfaces { for _, ifc := range interfaces {
if _, err := k.updateInterface(ifc); err != nil { if _, err := k.updateInterface(ctx, ifc); err != nil {
return err return err
} }
} }
return nil return nil
} }
func (k *kataAgent) updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) { func (k *kataAgent) updateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
if routes != nil { if routes != nil {
routesReq := &grpc.UpdateRoutesRequest{ routesReq := &grpc.UpdateRoutesRequest{
Routes: &grpc.Routes{ Routes: &grpc.Routes{
Routes: routes, Routes: routes,
}, },
} }
resultingRoutes, err := k.sendReq(routesReq) resultingRoutes, err := k.sendReq(ctx, routesReq)
if err != nil { if err != nil {
k.Logger().WithFields(logrus.Fields{ k.Logger().WithFields(logrus.Fields{
"routes-requested": fmt.Sprintf("%+v", routes), "routes-requested": fmt.Sprintf("%+v", routes),
@ -656,14 +656,14 @@ func (k *kataAgent) updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, err
return nil, nil return nil, nil
} }
func (k *kataAgent) addARPNeighbors(neighs []*pbTypes.ARPNeighbor) error { func (k *kataAgent) addARPNeighbors(ctx context.Context, neighs []*pbTypes.ARPNeighbor) error {
if neighs != nil { if neighs != nil {
neighsReq := &grpc.AddARPNeighborsRequest{ neighsReq := &grpc.AddARPNeighborsRequest{
Neighbors: &grpc.ARPNeighbors{ Neighbors: &grpc.ARPNeighbors{
ARPNeighbors: neighs, ARPNeighbors: neighs,
}, },
} }
_, err := k.sendReq(neighsReq) _, err := k.sendReq(ctx, neighsReq)
if err != nil { if err != nil {
if grpcStatus.Convert(err).Code() == codes.Unimplemented { if grpcStatus.Convert(err).Code() == codes.Unimplemented {
k.Logger().WithFields(logrus.Fields{ k.Logger().WithFields(logrus.Fields{
@ -680,9 +680,9 @@ func (k *kataAgent) addARPNeighbors(neighs []*pbTypes.ARPNeighbor) error {
return nil return nil
} }
func (k *kataAgent) listInterfaces() ([]*pbTypes.Interface, error) { func (k *kataAgent) listInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
req := &grpc.ListInterfacesRequest{} req := &grpc.ListInterfacesRequest{}
resultingInterfaces, err := k.sendReq(req) resultingInterfaces, err := k.sendReq(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -693,9 +693,9 @@ func (k *kataAgent) listInterfaces() ([]*pbTypes.Interface, error) {
return resultInterfaces.Interfaces, nil return resultInterfaces.Interfaces, nil
} }
func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) { func (k *kataAgent) listRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
req := &grpc.ListRoutesRequest{} req := &grpc.ListRoutesRequest{}
resultingRoutes, err := k.sendReq(req) resultingRoutes, err := k.sendReq(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -754,8 +754,8 @@ func (k *kataAgent) getDNS(sandbox *Sandbox) ([]string, error) {
return nil, nil return nil, nil
} }
func (k *kataAgent) startSandbox(sandbox *Sandbox) error { func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
span, _ := k.trace("startSandbox") span, ctx := k.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if err := k.setAgentURL(); err != nil { if err := k.setAgentURL(); err != nil {
@ -773,7 +773,7 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
} }
// check grpc server is serving // check grpc server is serving
if err = k.check(); err != nil { if err = k.check(ctx); err != nil {
return err return err
} }
@ -784,17 +784,17 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
if err != nil { if err != nil {
return err return err
} }
if err = k.updateInterfaces(interfaces); err != nil { if err = k.updateInterfaces(ctx, interfaces); err != nil {
return err return err
} }
if _, err = k.updateRoutes(routes); err != nil { if _, err = k.updateRoutes(ctx, routes); err != nil {
return err return err
} }
if err = k.addARPNeighbors(neighs); err != nil { if err = k.addARPNeighbors(ctx, neighs); err != nil {
return err return err
} }
storages := setupStorages(sandbox) storages := setupStorages(ctx, sandbox)
kmodules := setupKernelModules(k.kmodules) kmodules := setupKernelModules(k.kmodules)
@ -808,13 +808,13 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
KernelModules: kmodules, KernelModules: kmodules,
} }
_, err = k.sendReq(req) _, err = k.sendReq(ctx, req)
if err != nil { if err != nil {
return err return err
} }
if k.dynamicTracing { if k.dynamicTracing {
_, err = k.sendReq(&grpc.StartTracingRequest{}) _, err = k.sendReq(ctx, &grpc.StartTracingRequest{})
if err != nil { if err != nil {
return err return err
} }
@ -844,9 +844,9 @@ func setupKernelModules(kmodules []string) []*grpc.KernelModule {
return modules return modules
} }
func setupStorages(sandbox *Sandbox) []*grpc.Storage { func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage {
storages := []*grpc.Storage{} storages := []*grpc.Storage{}
caps := sandbox.hypervisor.capabilities() caps := sandbox.hypervisor.capabilities(ctx)
// append 9p shared volume to storages only if filesystem sharing is supported // append 9p shared volume to storages only if filesystem sharing is supported
if caps.IsFsSharingSupported() { if caps.IsFsSharingSupported() {
@ -909,18 +909,18 @@ func setupStorages(sandbox *Sandbox) []*grpc.Storage {
return storages return storages
} }
func (k *kataAgent) stopSandbox(sandbox *Sandbox) error { func (k *kataAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error {
span, _ := k.trace("stopSandbox") span, ctx := k.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
req := &grpc.DestroySandboxRequest{} req := &grpc.DestroySandboxRequest{}
if _, err := k.sendReq(req); err != nil { if _, err := k.sendReq(ctx, req); err != nil {
return err return err
} }
if k.dynamicTracing { if k.dynamicTracing {
_, err := k.sendReq(&grpc.StopTracingRequest{}) _, err := k.sendReq(ctx, &grpc.StopTracingRequest{})
if err != nil { if err != nil {
return err return err
} }
@ -1187,19 +1187,19 @@ func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*gr
// been performed before the container creation failed. // been performed before the container creation failed.
// - Unmount container volumes. // - Unmount container volumes.
// - Unmount container rootfs. // - Unmount container rootfs.
func (k *kataAgent) rollbackFailingContainerCreation(c *Container) { func (k *kataAgent) rollbackFailingContainerCreation(ctx context.Context, c *Container) {
if c != nil { if c != nil {
if err2 := c.unmountHostMounts(); err2 != nil { if err2 := c.unmountHostMounts(ctx); err2 != nil {
k.Logger().WithError(err2).Error("rollback failed unmountHostMounts()") k.Logger().WithError(err2).Error("rollback failed unmountHostMounts()")
} }
if err2 := bindUnmountContainerRootfs(k.ctx, getMountPath(c.sandbox.id), c.id); err2 != nil { if err2 := bindUnmountContainerRootfs(ctx, getMountPath(c.sandbox.id), c.id); err2 != nil {
k.Logger().WithError(err2).Error("rollback failed bindUnmountContainerRootfs()") k.Logger().WithError(err2).Error("rollback failed bindUnmountContainerRootfs()")
} }
} }
} }
func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) { func (k *kataAgent) buildContainerRootfs(ctx context.Context, sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
if c.state.Fstype != "" && c.state.BlockDeviceID != "" { if c.state.Fstype != "" && c.state.BlockDeviceID != "" {
// The rootfs storage volume represents the container rootfs // The rootfs storage volume represents the container rootfs
// mount point inside the guest. // mount point inside the guest.
@ -1264,15 +1264,15 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
// With virtiofs/9pfs we don't need to ask the agent to mount the rootfs as the shared directory // With virtiofs/9pfs we don't need to ask the agent to mount the rootfs as the shared directory
// (kataGuestSharedDir) is already mounted in the guest. We only need to mount the rootfs from // (kataGuestSharedDir) is already mounted in the guest. We only need to mount the rootfs from
// the host and it will show up in the guest. // the host and it will show up in the guest.
if err := bindMountContainerRootfs(k.ctx, getMountPath(sandbox.id), c.id, c.rootFs.Target, false); err != nil { if err := bindMountContainerRootfs(ctx, getMountPath(sandbox.id), c.id, c.rootFs.Target, false); err != nil {
return nil, err return nil, err
} }
return nil, nil return nil, nil
} }
func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) { func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (p *Process, err error) {
span, _ := k.trace("createContainer") span, ctx := k.trace(ctx, "createContainer")
defer span.End() defer span.End()
var ctrStorages []*grpc.Storage var ctrStorages []*grpc.Storage
@ -1288,14 +1288,14 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
defer func() { defer func() {
if err != nil { if err != nil {
k.Logger().WithError(err).Error("createContainer failed") k.Logger().WithError(err).Error("createContainer failed")
k.rollbackFailingContainerCreation(c) k.rollbackFailingContainerCreation(ctx, c)
} }
}() }()
// setup rootfs -- if its block based, we'll receive a non-nil storage object representing // setup rootfs -- if its block based, we'll receive a non-nil storage object representing
// the block device for the rootfs, which us utilized for mounting in the guest. This'll be handled // the block device for the rootfs, which us utilized for mounting in the guest. This'll be handled
// already for non-block based rootfs // already for non-block based rootfs
if rootfs, err = k.buildContainerRootfs(sandbox, c, rootPathParent); err != nil { if rootfs, err = k.buildContainerRootfs(ctx, sandbox, c, rootPathParent); err != nil {
return nil, err return nil, err
} }
@ -1313,7 +1313,7 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
} }
// Handle container mounts // Handle container mounts
newMounts, ignoredMounts, err := c.mountSharedDirMounts(getSharePath(sandbox.id), getMountPath(sandbox.id), kataGuestSharedDir()) newMounts, ignoredMounts, err := c.mountSharedDirMounts(ctx, getSharePath(sandbox.id), getMountPath(sandbox.id), kataGuestSharedDir())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1380,7 +1380,7 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
SandboxPidns: sharedPidNs, SandboxPidns: sharedPidNs,
} }
if _, err = k.sendReq(req); err != nil { if _, err = k.sendReq(ctx, req); err != nil {
return nil, err return nil, err
} }
@ -1601,27 +1601,27 @@ func (k *kataAgent) handlePidNamespace(grpcSpec *grpc.Spec, sandbox *Sandbox) bo
return sharedPidNs return sharedPidNs
} }
func (k *kataAgent) startContainer(sandbox *Sandbox, c *Container) error { func (k *kataAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error {
span, _ := k.trace("startContainer") span, ctx := k.trace(ctx, "startContainer")
defer span.End() defer span.End()
req := &grpc.StartContainerRequest{ req := &grpc.StartContainerRequest{
ContainerId: c.id, ContainerId: c.id,
} }
_, err := k.sendReq(req) _, err := k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) stopContainer(sandbox *Sandbox, c Container) error { func (k *kataAgent) stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
span, _ := k.trace("stopContainer") span, ctx := k.trace(ctx, "stopContainer")
defer span.End() defer span.End()
_, err := k.sendReq(&grpc.RemoveContainerRequest{ContainerId: c.id}) _, err := k.sendReq(ctx, &grpc.RemoveContainerRequest{ContainerId: c.id})
return err return err
} }
func (k *kataAgent) signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error { func (k *kataAgent) signalProcess(ctx context.Context, c *Container, processID string, signal syscall.Signal, all bool) error {
execID := processID execID := processID
if all { if all {
// kata agent uses empty execId to signal all processes in a container // kata agent uses empty execId to signal all processes in a container
@ -1633,11 +1633,11 @@ func (k *kataAgent) signalProcess(c *Container, processID string, signal syscall
Signal: uint32(signal), Signal: uint32(signal),
} }
_, err := k.sendReq(req) _, err := k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) winsizeProcess(c *Container, processID string, height, width uint32) error { func (k *kataAgent) winsizeProcess(ctx context.Context, c *Container, processID string, height, width uint32) error {
req := &grpc.TtyWinResizeRequest{ req := &grpc.TtyWinResizeRequest{
ContainerId: c.id, ContainerId: c.id,
ExecId: processID, ExecId: processID,
@ -1645,18 +1645,18 @@ func (k *kataAgent) winsizeProcess(c *Container, processID string, height, width
Column: width, Column: width,
} }
_, err := k.sendReq(req) _, err := k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) processListContainer(sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) { func (k *kataAgent) processListContainer(ctx context.Context, sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) {
req := &grpc.ListProcessesRequest{ req := &grpc.ListProcessesRequest{
ContainerId: c.id, ContainerId: c.id,
Format: options.Format, Format: options.Format,
Args: options.Args, Args: options.Args,
} }
resp, err := k.sendReq(req) resp, err := k.sendReq(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1669,7 +1669,7 @@ func (k *kataAgent) processListContainer(sandbox *Sandbox, c Container, options
return processList.ProcessList, nil return processList.ProcessList, nil
} }
func (k *kataAgent) updateContainer(sandbox *Sandbox, c Container, resources specs.LinuxResources) error { func (k *kataAgent) updateContainer(ctx context.Context, sandbox *Sandbox, c Container, resources specs.LinuxResources) error {
grpcResources, err := grpc.ResourcesOCItoGRPC(&resources) grpcResources, err := grpc.ResourcesOCItoGRPC(&resources)
if err != nil { if err != nil {
return err return err
@ -1680,29 +1680,29 @@ func (k *kataAgent) updateContainer(sandbox *Sandbox, c Container, resources spe
Resources: grpcResources, Resources: grpcResources,
} }
_, err = k.sendReq(req) _, err = k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) pauseContainer(sandbox *Sandbox, c Container) error { func (k *kataAgent) pauseContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
req := &grpc.PauseContainerRequest{ req := &grpc.PauseContainerRequest{
ContainerId: c.id, ContainerId: c.id,
} }
_, err := k.sendReq(req) _, err := k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) resumeContainer(sandbox *Sandbox, c Container) error { func (k *kataAgent) resumeContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
req := &grpc.ResumeContainerRequest{ req := &grpc.ResumeContainerRequest{
ContainerId: c.id, ContainerId: c.id,
} }
_, err := k.sendReq(req) _, err := k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error { func (k *kataAgent) memHotplugByProbe(ctx context.Context, addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error {
if memorySectionSizeMB == uint32(0) { if memorySectionSizeMB == uint32(0) {
return fmt.Errorf("memorySectionSizeMB couldn't be zero") return fmt.Errorf("memorySectionSizeMB couldn't be zero")
} }
@ -1722,27 +1722,27 @@ func (k *kataAgent) memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionS
MemHotplugProbeAddr: addrList, MemHotplugProbeAddr: addrList,
} }
_, err := k.sendReq(req) _, err := k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error { func (k *kataAgent) onlineCPUMem(ctx context.Context, cpus uint32, cpuOnly bool) error {
req := &grpc.OnlineCPUMemRequest{ req := &grpc.OnlineCPUMemRequest{
Wait: false, Wait: false,
NbCpus: cpus, NbCpus: cpus,
CpuOnly: cpuOnly, CpuOnly: cpuOnly,
} }
_, err := k.sendReq(req) _, err := k.sendReq(ctx, req)
return err return err
} }
func (k *kataAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) { func (k *kataAgent) statsContainer(ctx context.Context, sandbox *Sandbox, c Container) (*ContainerStats, error) {
req := &grpc.StatsContainerRequest{ req := &grpc.StatsContainerRequest{
ContainerId: c.id, ContainerId: c.id,
} }
returnStats, err := k.sendReq(req) returnStats, err := k.sendReq(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1769,7 +1769,7 @@ func (k *kataAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerSta
return containerStats, nil return containerStats, nil
} }
func (k *kataAgent) connect() error { func (k *kataAgent) connect(ctx context.Context) error {
if k.dead { if k.dead {
return errors.New("Dead agent") return errors.New("Dead agent")
} }
@ -1778,7 +1778,7 @@ func (k *kataAgent) connect() error {
return nil return nil
} }
span, _ := k.trace("connect") span, ctx := k.trace(ctx, "connect")
defer span.End() defer span.End()
// This is for the first connection only, to prevent race // This is for the first connection only, to prevent race
@ -1801,8 +1801,8 @@ func (k *kataAgent) connect() error {
return nil return nil
} }
func (k *kataAgent) disconnect() error { func (k *kataAgent) disconnect(ctx context.Context) error {
span, _ := k.trace("disconnect") span, _ := k.trace(ctx, "disconnect")
defer span.End() defer span.End()
k.Lock() k.Lock()
@ -1823,22 +1823,22 @@ func (k *kataAgent) disconnect() error {
} }
// check grpc server is serving // check grpc server is serving
func (k *kataAgent) check() error { func (k *kataAgent) check(ctx context.Context) error {
span, _ := k.trace("check") span, ctx := k.trace(ctx, "check")
defer span.End() defer span.End()
_, err := k.sendReq(&grpc.CheckRequest{}) _, err := k.sendReq(ctx, &grpc.CheckRequest{})
if err != nil { if err != nil {
err = fmt.Errorf("Failed to check if grpc server is working: %s", err) err = fmt.Errorf("Failed to check if grpc server is working: %s", err)
} }
return err return err
} }
func (k *kataAgent) waitProcess(c *Container, processID string) (int32, error) { func (k *kataAgent) waitProcess(ctx context.Context, c *Container, processID string) (int32, error) {
span, _ := k.trace("waitProcess") span, ctx := k.trace(ctx, "waitProcess")
defer span.End() defer span.End()
resp, err := k.sendReq(&grpc.WaitProcessRequest{ resp, err := k.sendReq(ctx, &grpc.WaitProcessRequest{
ContainerId: c.id, ContainerId: c.id,
ExecId: processID, ExecId: processID,
}) })
@ -1849,8 +1849,8 @@ func (k *kataAgent) waitProcess(c *Container, processID string) (int32, error) {
return resp.(*grpc.WaitProcessResponse).Status, nil return resp.(*grpc.WaitProcessResponse).Status, nil
} }
func (k *kataAgent) writeProcessStdin(c *Container, ProcessID string, data []byte) (int, error) { func (k *kataAgent) writeProcessStdin(ctx context.Context, c *Container, ProcessID string, data []byte) (int, error) {
resp, err := k.sendReq(&grpc.WriteStreamRequest{ resp, err := k.sendReq(ctx, &grpc.WriteStreamRequest{
ContainerId: c.id, ContainerId: c.id,
ExecId: ProcessID, ExecId: ProcessID,
Data: data, Data: data,
@ -1863,8 +1863,8 @@ func (k *kataAgent) writeProcessStdin(c *Container, ProcessID string, data []byt
return int(resp.(*grpc.WriteStreamResponse).Len), nil return int(resp.(*grpc.WriteStreamResponse).Len), nil
} }
func (k *kataAgent) closeProcessStdin(c *Container, ProcessID string) error { func (k *kataAgent) closeProcessStdin(ctx context.Context, c *Container, ProcessID string) error {
_, err := k.sendReq(&grpc.CloseStdinRequest{ _, err := k.sendReq(ctx, &grpc.CloseStdinRequest{
ContainerId: c.id, ContainerId: c.id,
ExecId: ProcessID, ExecId: ProcessID,
}) })
@ -1872,8 +1872,8 @@ func (k *kataAgent) closeProcessStdin(c *Container, ProcessID string) error {
return err return err
} }
func (k *kataAgent) reseedRNG(data []byte) error { func (k *kataAgent) reseedRNG(ctx context.Context, data []byte) error {
_, err := k.sendReq(&grpc.ReseedRandomDevRequest{ _, err := k.sendReq(ctx, &grpc.ReseedRandomDevRequest{
Data: data, Data: data,
}) })
@ -1996,17 +1996,17 @@ func (k *kataAgent) getReqContext(reqName string) (ctx context.Context, cancel c
return ctx, cancel return ctx, cancel
} }
func (k *kataAgent) sendReq(request interface{}) (interface{}, error) { func (k *kataAgent) sendReq(spanCtx context.Context, request interface{}) (interface{}, error) {
start := time.Now() start := time.Now()
span, _ := k.trace("sendReq") span, spanCtx := k.trace(spanCtx, "sendReq")
span.SetAttributes(label.Key("request").String(fmt.Sprintf("%+v", request))) span.SetAttributes(label.Key("request").String(fmt.Sprintf("%+v", request)))
defer span.End() defer span.End()
if err := k.connect(); err != nil { if err := k.connect(spanCtx); err != nil {
return nil, err return nil, err
} }
if !k.keepConn { if !k.keepConn {
defer k.disconnect() defer k.disconnect(spanCtx)
} }
msgName := proto.MessageName(request.(proto.Message)) msgName := proto.MessageName(request.(proto.Message))
@ -2028,24 +2028,24 @@ func (k *kataAgent) sendReq(request interface{}) (interface{}, error) {
} }
// readStdout and readStderr are special that we cannot differentiate them with the request types... // readStdout and readStderr are special that we cannot differentiate them with the request types...
func (k *kataAgent) readProcessStdout(c *Container, processID string, data []byte) (int, error) { func (k *kataAgent) readProcessStdout(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
if err := k.connect(); err != nil { if err := k.connect(ctx); err != nil {
return 0, err return 0, err
} }
if !k.keepConn { if !k.keepConn {
defer k.disconnect() defer k.disconnect(ctx)
} }
return k.readProcessStream(c.id, processID, data, k.client.AgentServiceClient.ReadStdout) return k.readProcessStream(c.id, processID, data, k.client.AgentServiceClient.ReadStdout)
} }
// readStdout and readStderr are special that we cannot differentiate them with the request types... // readStdout and readStderr are special that we cannot differentiate them with the request types...
func (k *kataAgent) readProcessStderr(c *Container, processID string, data []byte) (int, error) { func (k *kataAgent) readProcessStderr(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
if err := k.connect(); err != nil { if err := k.connect(ctx); err != nil {
return 0, err return 0, err
} }
if !k.keepConn { if !k.keepConn {
defer k.disconnect() defer k.disconnect(ctx)
} }
return k.readProcessStream(c.id, processID, data, k.client.AgentServiceClient.ReadStderr) return k.readProcessStream(c.id, processID, data, k.client.AgentServiceClient.ReadStderr)
@ -2066,8 +2066,8 @@ func (k *kataAgent) readProcessStream(containerID, processID string, data []byte
return 0, err return 0, err
} }
func (k *kataAgent) getGuestDetails(req *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) { func (k *kataAgent) getGuestDetails(ctx context.Context, req *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) {
resp, err := k.sendReq(req) resp, err := k.sendReq(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -2075,8 +2075,8 @@ func (k *kataAgent) getGuestDetails(req *grpc.GuestDetailsRequest) (*grpc.GuestD
return resp.(*grpc.GuestDetailsResponse), nil return resp.(*grpc.GuestDetailsResponse), nil
} }
func (k *kataAgent) setGuestDateTime(tv time.Time) error { func (k *kataAgent) setGuestDateTime(ctx context.Context, tv time.Time) error {
_, err := k.sendReq(&grpc.SetGuestDateTimeRequest{ _, err := k.sendReq(ctx, &grpc.SetGuestDateTimeRequest{
Sec: tv.Unix(), Sec: tv.Unix(),
Usec: int64(tv.Nanosecond() / 1e3), Usec: int64(tv.Nanosecond() / 1e3),
}) })
@ -2084,7 +2084,7 @@ func (k *kataAgent) setGuestDateTime(tv time.Time) error {
return err return err
} }
func (k *kataAgent) copyFile(src, dst string) error { func (k *kataAgent) copyFile(ctx context.Context, src, dst string) error {
var st unix.Stat_t var st unix.Stat_t
err := unix.Stat(src, &st) err := unix.Stat(src, &st)
@ -2115,7 +2115,7 @@ func (k *kataAgent) copyFile(src, dst string) error {
// Handle the special case where the file is empty // Handle the special case where the file is empty
if fileSize == 0 { if fileSize == 0 {
_, err = k.sendReq(cpReq) _, err = k.sendReq(ctx, cpReq)
return err return err
} }
@ -2131,7 +2131,7 @@ func (k *kataAgent) copyFile(src, dst string) error {
cpReq.Data = b[:bytesToCopy] cpReq.Data = b[:bytesToCopy]
cpReq.Offset = offset cpReq.Offset = offset
if _, err = k.sendReq(cpReq); err != nil { if _, err = k.sendReq(ctx, cpReq); err != nil {
return fmt.Errorf("Could not send CopyFile request: %v", err) return fmt.Errorf("Could not send CopyFile request: %v", err)
} }
@ -2143,13 +2143,13 @@ func (k *kataAgent) copyFile(src, dst string) error {
return nil return nil
} }
func (k *kataAgent) markDead() { func (k *kataAgent) markDead(ctx context.Context) {
k.Logger().Infof("mark agent dead") k.Logger().Infof("mark agent dead")
k.dead = true k.dead = true
k.disconnect() k.disconnect(ctx)
} }
func (k *kataAgent) cleanup(s *Sandbox) { func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
if err := cleanupSandboxBindMounts(s); err != nil { if err := cleanupSandboxBindMounts(s); err != nil {
k.Logger().WithError(err).Errorf("failed to cleanup observability logs bindmount") k.Logger().WithError(err).Errorf("failed to cleanup observability logs bindmount")
} }
@ -2163,7 +2163,7 @@ func (k *kataAgent) cleanup(s *Sandbox) {
// Unmount mount path // Unmount mount path
path = getMountPath(s.id) path = getMountPath(s.id)
if err := bindUnmountAllRootfs(k.ctx, path, s); err != nil { if err := bindUnmountAllRootfs(ctx, path, s); err != nil {
k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path) k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path)
} }
if err := os.RemoveAll(getSandboxPath(s.id)); err != nil { if err := os.RemoveAll(getSandboxPath(s.id)); err != nil {
@ -2181,9 +2181,9 @@ func (k *kataAgent) load(s persistapi.AgentState) {
k.state.URL = s.URL k.state.URL = s.URL
} }
func (k *kataAgent) getOOMEvent() (string, error) { func (k *kataAgent) getOOMEvent(ctx context.Context) (string, error) {
req := &grpc.GetOOMEventRequest{} req := &grpc.GetOOMEventRequest{}
result, err := k.sendReq(req) result, err := k.sendReq(ctx, req)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -2193,8 +2193,8 @@ func (k *kataAgent) getOOMEvent() (string, error) {
return "", err return "", err
} }
func (k *kataAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) { func (k *kataAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
resp, err := k.sendReq(req) resp, err := k.sendReq(ctx, req)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"os" "os"
@ -59,7 +60,7 @@ func (endpoint *MacvtapEndpoint) SetProperties(properties NetworkInfo) {
} }
// Attach for macvtap endpoint passes macvtap device to the hypervisor. // Attach for macvtap endpoint passes macvtap device to the hypervisor.
func (endpoint *MacvtapEndpoint) Attach(s *Sandbox) error { func (endpoint *MacvtapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
var err error var err error
h := s.hypervisor h := s.hypervisor
@ -76,21 +77,21 @@ func (endpoint *MacvtapEndpoint) Attach(s *Sandbox) error {
endpoint.VhostFds = vhostFds endpoint.VhostFds = vhostFds
} }
return h.addDevice(endpoint, netDev) return h.addDevice(ctx, endpoint, netDev)
} }
// Detach for macvtap endpoint does nothing. // Detach for macvtap endpoint does nothing.
func (endpoint *MacvtapEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *MacvtapEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
return nil return nil
} }
// HotAttach for macvtap endpoint not supported yet // HotAttach for macvtap endpoint not supported yet
func (endpoint *MacvtapEndpoint) HotAttach(h hypervisor) error { func (endpoint *MacvtapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
return fmt.Errorf("MacvtapEndpoint does not support Hot attach") return fmt.Errorf("MacvtapEndpoint does not support Hot attach")
} }
// HotDetach for macvtap endpoint not supported yet // HotDetach for macvtap endpoint not supported yet
func (endpoint *MacvtapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *MacvtapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("MacvtapEndpoint does not support Hot detach") return fmt.Errorf("MacvtapEndpoint does not support Hot detach")
} }

View File

@ -36,7 +36,7 @@ func (n *mockAgent) longLiveConn() bool {
} }
// createSandbox is the Noop agent sandbox creation implementation. It does nothing. // createSandbox is the Noop agent sandbox creation implementation. It does nothing.
func (n *mockAgent) createSandbox(sandbox *Sandbox) error { func (n *mockAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
return nil return nil
} }
@ -46,137 +46,137 @@ func (n *mockAgent) capabilities() types.Capabilities {
} }
// disconnect is the Noop agent connection closer. It does nothing. // disconnect is the Noop agent connection closer. It does nothing.
func (n *mockAgent) disconnect() error { func (n *mockAgent) disconnect(ctx context.Context) error {
return nil return nil
} }
// exec is the Noop agent command execution implementation. It does nothing. // exec is the Noop agent command execution implementation. It does nothing.
func (n *mockAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) { func (n *mockAgent) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {
return nil, nil return nil, nil
} }
// startSandbox is the Noop agent Sandbox starting implementation. It does nothing. // startSandbox is the Noop agent Sandbox starting implementation. It does nothing.
func (n *mockAgent) startSandbox(sandbox *Sandbox) error { func (n *mockAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
return nil return nil
} }
// stopSandbox is the Noop agent Sandbox stopping implementation. It does nothing. // stopSandbox is the Noop agent Sandbox stopping implementation. It does nothing.
func (n *mockAgent) stopSandbox(sandbox *Sandbox) error { func (n *mockAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error {
return nil return nil
} }
// createContainer is the Noop agent Container creation implementation. It does nothing. // createContainer is the Noop agent Container creation implementation. It does nothing.
func (n *mockAgent) createContainer(sandbox *Sandbox, c *Container) (*Process, error) { func (n *mockAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (*Process, error) {
return &Process{}, nil return &Process{}, nil
} }
// startContainer is the Noop agent Container starting implementation. It does nothing. // startContainer is the Noop agent Container starting implementation. It does nothing.
func (n *mockAgent) startContainer(sandbox *Sandbox, c *Container) error { func (n *mockAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error {
return nil return nil
} }
// stopContainer is the Noop agent Container stopping implementation. It does nothing. // stopContainer is the Noop agent Container stopping implementation. It does nothing.
func (n *mockAgent) stopContainer(sandbox *Sandbox, c Container) error { func (n *mockAgent) stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
return nil return nil
} }
// signalProcess is the Noop agent Container signaling implementation. It does nothing. // signalProcess is the Noop agent Container signaling implementation. It does nothing.
func (n *mockAgent) signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error { func (n *mockAgent) signalProcess(ctx context.Context, c *Container, processID string, signal syscall.Signal, all bool) error {
return nil return nil
} }
// processListContainer is the Noop agent Container ps implementation. It does nothing. // processListContainer is the Noop agent Container ps implementation. It does nothing.
func (n *mockAgent) processListContainer(sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) { func (n *mockAgent) processListContainer(ctx context.Context, sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) {
return nil, nil return nil, nil
} }
// updateContainer is the Noop agent Container update implementation. It does nothing. // updateContainer is the Noop agent Container update implementation. It does nothing.
func (n *mockAgent) updateContainer(sandbox *Sandbox, c Container, resources specs.LinuxResources) error { func (n *mockAgent) updateContainer(ctx context.Context, sandbox *Sandbox, c Container, resources specs.LinuxResources) error {
return nil return nil
} }
// memHotplugByProbe is the Noop agent notify meomory hotplug event via probe interface implementation. It does nothing. // memHotplugByProbe is the Noop agent notify meomory hotplug event via probe interface implementation. It does nothing.
func (n *mockAgent) memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error { func (n *mockAgent) memHotplugByProbe(ctx context.Context, addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error {
return nil return nil
} }
// onlineCPUMem is the Noop agent Container online CPU and Memory implementation. It does nothing. // onlineCPUMem is the Noop agent Container online CPU and Memory implementation. It does nothing.
func (n *mockAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error { func (n *mockAgent) onlineCPUMem(ctx context.Context, cpus uint32, cpuOnly bool) error {
return nil return nil
} }
// updateInterface is the Noop agent Interface update implementation. It does nothing. // updateInterface is the Noop agent Interface update implementation. It does nothing.
func (n *mockAgent) updateInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) { func (n *mockAgent) updateInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
return nil, nil return nil, nil
} }
// listInterfaces is the Noop agent Interfaces list implementation. It does nothing. // listInterfaces is the Noop agent Interfaces list implementation. It does nothing.
func (n *mockAgent) listInterfaces() ([]*pbTypes.Interface, error) { func (n *mockAgent) listInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
return nil, nil return nil, nil
} }
// updateRoutes is the Noop agent Routes update implementation. It does nothing. // updateRoutes is the Noop agent Routes update implementation. It does nothing.
func (n *mockAgent) updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) { func (n *mockAgent) updateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
return nil, nil return nil, nil
} }
// listRoutes is the Noop agent Routes list implementation. It does nothing. // listRoutes is the Noop agent Routes list implementation. It does nothing.
func (n *mockAgent) listRoutes() ([]*pbTypes.Route, error) { func (n *mockAgent) listRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
return nil, nil return nil, nil
} }
// check is the Noop agent health checker. It does nothing. // check is the Noop agent health checker. It does nothing.
func (n *mockAgent) check() error { func (n *mockAgent) check(ctx context.Context) error {
return nil return nil
} }
// statsContainer is the Noop agent Container stats implementation. It does nothing. // statsContainer is the Noop agent Container stats implementation. It does nothing.
func (n *mockAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) { func (n *mockAgent) statsContainer(ctx context.Context, sandbox *Sandbox, c Container) (*ContainerStats, error) {
return &ContainerStats{}, nil return &ContainerStats{}, nil
} }
// waitProcess is the Noop agent process waiter. It does nothing. // waitProcess is the Noop agent process waiter. It does nothing.
func (n *mockAgent) waitProcess(c *Container, processID string) (int32, error) { func (n *mockAgent) waitProcess(ctx context.Context, c *Container, processID string) (int32, error) {
return 0, nil return 0, nil
} }
// winsizeProcess is the Noop agent process tty resizer. It does nothing. // winsizeProcess is the Noop agent process tty resizer. It does nothing.
func (n *mockAgent) winsizeProcess(c *Container, processID string, height, width uint32) error { func (n *mockAgent) winsizeProcess(ctx context.Context, c *Container, processID string, height, width uint32) error {
return nil return nil
} }
// writeProcessStdin is the Noop agent process stdin writer. It does nothing. // writeProcessStdin is the Noop agent process stdin writer. It does nothing.
func (n *mockAgent) writeProcessStdin(c *Container, ProcessID string, data []byte) (int, error) { func (n *mockAgent) writeProcessStdin(ctx context.Context, c *Container, ProcessID string, data []byte) (int, error) {
return 0, nil return 0, nil
} }
// closeProcessStdin is the Noop agent process stdin closer. It does nothing. // closeProcessStdin is the Noop agent process stdin closer. It does nothing.
func (n *mockAgent) closeProcessStdin(c *Container, ProcessID string) error { func (n *mockAgent) closeProcessStdin(ctx context.Context, c *Container, ProcessID string) error {
return nil return nil
} }
// readProcessStdout is the Noop agent process stdout reader. It does nothing. // readProcessStdout is the Noop agent process stdout reader. It does nothing.
func (n *mockAgent) readProcessStdout(c *Container, processID string, data []byte) (int, error) { func (n *mockAgent) readProcessStdout(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
return 0, nil return 0, nil
} }
// readProcessStderr is the Noop agent process stderr reader. It does nothing. // readProcessStderr is the Noop agent process stderr reader. It does nothing.
func (n *mockAgent) readProcessStderr(c *Container, processID string, data []byte) (int, error) { func (n *mockAgent) readProcessStderr(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
return 0, nil return 0, nil
} }
// pauseContainer is the Noop agent Container pause implementation. It does nothing. // pauseContainer is the Noop agent Container pause implementation. It does nothing.
func (n *mockAgent) pauseContainer(sandbox *Sandbox, c Container) error { func (n *mockAgent) pauseContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
return nil return nil
} }
// resumeContainer is the Noop agent Container resume implementation. It does nothing. // resumeContainer is the Noop agent Container resume implementation. It does nothing.
func (n *mockAgent) resumeContainer(sandbox *Sandbox, c Container) error { func (n *mockAgent) resumeContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
return nil return nil
} }
// configHypervisor is the Noop agent hypervisor configuration implementation. It does nothing. // configHypervisor is the Noop agent hypervisor configuration implementation. It does nothing.
func (n *mockAgent) configure(h hypervisor, id, sharePath string, config interface{}) error { func (n *mockAgent) configure(ctx context.Context, h hypervisor, id, sharePath string, config interface{}) error {
return nil return nil
} }
@ -185,7 +185,7 @@ func (n *mockAgent) configureFromGrpc(h hypervisor, id string, config interface{
} }
// reseedRNG is the Noop agent RND reseeder. It does nothing. // reseedRNG is the Noop agent RND reseeder. It does nothing.
func (n *mockAgent) reseedRNG(data []byte) error { func (n *mockAgent) reseedRNG(ctx context.Context, data []byte) error {
return nil return nil
} }
@ -205,24 +205,24 @@ func (n *mockAgent) setAgentURL() error {
} }
// getGuestDetails is the Noop agent GuestDetails queryer. It does nothing. // getGuestDetails is the Noop agent GuestDetails queryer. It does nothing.
func (n *mockAgent) getGuestDetails(*grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) { func (n *mockAgent) getGuestDetails(context.Context, *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) {
return nil, nil return nil, nil
} }
// setGuestDateTime is the Noop agent guest time setter. It does nothing. // setGuestDateTime is the Noop agent guest time setter. It does nothing.
func (n *mockAgent) setGuestDateTime(time.Time) error { func (n *mockAgent) setGuestDateTime(context.Context, time.Time) error {
return nil return nil
} }
// copyFile is the Noop agent copy file. It does nothing. // copyFile is the Noop agent copy file. It does nothing.
func (n *mockAgent) copyFile(src, dst string) error { func (n *mockAgent) copyFile(ctx context.Context, src, dst string) error {
return nil return nil
} }
func (n *mockAgent) markDead() { func (n *mockAgent) markDead(ctx context.Context) {
} }
func (n *mockAgent) cleanup(s *Sandbox) { func (n *mockAgent) cleanup(ctx context.Context, s *Sandbox) {
} }
// save is the Noop agent state saver. It does nothing. // save is the Noop agent state saver. It does nothing.
@ -233,10 +233,10 @@ func (n *mockAgent) save() (s persistapi.AgentState) {
// load is the Noop agent state loader. It does nothing. // load is the Noop agent state loader. It does nothing.
func (n *mockAgent) load(s persistapi.AgentState) {} func (n *mockAgent) load(s persistapi.AgentState) {}
func (n *mockAgent) getOOMEvent() (string, error) { func (n *mockAgent) getOOMEvent(ctx context.Context) (string, error) {
return "", nil return "", nil
} }
func (k *mockAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) { func (k *mockAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
return nil, nil return nil, nil
} }

View File

@ -20,7 +20,7 @@ type mockHypervisor struct {
mockPid int mockPid int
} }
func (m *mockHypervisor) capabilities() types.Capabilities { func (m *mockHypervisor) capabilities(ctx context.Context) types.Capabilities {
return types.Capabilities{} return types.Capabilities{}
} }
@ -37,19 +37,19 @@ func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS
return nil return nil
} }
func (m *mockHypervisor) startSandbox(timeout int) error { func (m *mockHypervisor) startSandbox(ctx context.Context, timeout int) error {
return nil return nil
} }
func (m *mockHypervisor) stopSandbox() error { func (m *mockHypervisor) stopSandbox(ctx context.Context) error {
return nil return nil
} }
func (m *mockHypervisor) pauseSandbox() error { func (m *mockHypervisor) pauseSandbox(ctx context.Context) error {
return nil return nil
} }
func (m *mockHypervisor) resumeSandbox() error { func (m *mockHypervisor) resumeSandbox(ctx context.Context) error {
return nil return nil
} }
@ -57,11 +57,11 @@ func (m *mockHypervisor) saveSandbox() error {
return nil return nil
} }
func (m *mockHypervisor) addDevice(devInfo interface{}, devType deviceType) error { func (m *mockHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
return nil return nil
} }
func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (m *mockHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
switch devType { switch devType {
case cpuDev: case cpuDev:
return devInfo.(uint32), nil return devInfo.(uint32), nil
@ -72,7 +72,7 @@ func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceTyp
return nil, nil return nil, nil
} }
func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (m *mockHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
switch devType { switch devType {
case cpuDev: case cpuDev:
return devInfo.(uint32), nil return devInfo.(uint32), nil
@ -82,26 +82,26 @@ func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType device
return nil, nil return nil, nil
} }
func (m *mockHypervisor) getSandboxConsole(sandboxID string) (string, string, error) { func (m *mockHypervisor) getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error) {
return "", "", nil return "", "", nil
} }
func (m *mockHypervisor) resizeMemory(memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, memoryDevice, error) { func (m *mockHypervisor) resizeMemory(ctx context.Context, memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
return 0, memoryDevice{}, nil return 0, memoryDevice{}, nil
} }
func (m *mockHypervisor) resizeVCPUs(cpus uint32) (uint32, uint32, error) { func (m *mockHypervisor) resizeVCPUs(ctx context.Context, cpus uint32) (uint32, uint32, error) {
return 0, 0, nil return 0, 0, nil
} }
func (m *mockHypervisor) disconnect() { func (m *mockHypervisor) disconnect(ctx context.Context) {
} }
func (m *mockHypervisor) getThreadIDs() (vcpuThreadIDs, error) { func (m *mockHypervisor) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
vcpus := map[int]int{0: os.Getpid()} vcpus := map[int]int{0: os.Getpid()}
return vcpuThreadIDs{vcpus}, nil return vcpuThreadIDs{vcpus}, nil
} }
func (m *mockHypervisor) cleanup() error { func (m *mockHypervisor) cleanup(ctx context.Context) error {
return nil return nil
} }
@ -113,7 +113,7 @@ func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *Hypervi
return errors.New("mockHypervisor is not supported by VM cache") return errors.New("mockHypervisor is not supported by VM cache")
} }
func (m *mockHypervisor) toGrpc() ([]byte, error) { func (m *mockHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
return nil, errors.New("mockHypervisor is not supported by VM cache") return nil, errors.New("mockHypervisor is not supported by VM cache")
} }

View File

@ -47,7 +47,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
func TestMockHypervisorStartSandbox(t *testing.T) { func TestMockHypervisorStartSandbox(t *testing.T) {
var m *mockHypervisor var m *mockHypervisor
assert.NoError(t, m.startSandbox(vmStartTimeout)) assert.NoError(t, m.startSandbox(context.Background(), vmStartTimeout))
} }
func TestMockHypervisorStopSandbox(t *testing.T) { func TestMockHypervisorStopSandbox(t *testing.T) {
@ -59,7 +59,7 @@ func TestMockHypervisorStopSandbox(t *testing.T) {
func TestMockHypervisorAddDevice(t *testing.T) { func TestMockHypervisorAddDevice(t *testing.T) {
var m *mockHypervisor var m *mockHypervisor
assert.NoError(t, m.addDevice(nil, imgDev)) assert.NoError(t, m.addDevice(context.Background(), nil, imgDev))
} }
func TestMockHypervisorGetSandboxConsole(t *testing.T) { func TestMockHypervisorGetSandboxConsole(t *testing.T) {

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"sync" "sync"
"time" "time"
@ -36,7 +37,7 @@ func newMonitor(s *Sandbox) *monitor {
} }
} }
func (m *monitor) newWatcher() (chan error, error) { func (m *monitor) newWatcher(ctx context.Context) (chan error, error) {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -57,8 +58,8 @@ func (m *monitor) newWatcher() (chan error, error) {
m.wg.Done() m.wg.Done()
return return
case <-tick.C: case <-tick.C:
m.watchHypervisor() m.watchHypervisor(ctx)
m.watchAgent() m.watchAgent(ctx)
} }
} }
}() }()
@ -67,8 +68,8 @@ func (m *monitor) newWatcher() (chan error, error) {
return watcher, nil return watcher, nil
} }
func (m *monitor) notify(err error) { func (m *monitor) notify(ctx context.Context, err error) {
m.sandbox.agent.markDead() m.sandbox.agent.markDead(ctx)
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -127,17 +128,17 @@ func (m *monitor) stop() {
} }
} }
func (m *monitor) watchAgent() { func (m *monitor) watchAgent(ctx context.Context) {
err := m.sandbox.agent.check() err := m.sandbox.agent.check(ctx)
if err != nil { if err != nil {
// TODO: define and export error types // TODO: define and export error types
m.notify(errors.Wrapf(err, "failed to ping agent")) m.notify(ctx, errors.Wrapf(err, "failed to ping agent"))
} }
} }
func (m *monitor) watchHypervisor() error { func (m *monitor) watchHypervisor(ctx context.Context) error {
if err := m.sandbox.hypervisor.check(); err != nil { if err := m.sandbox.hypervisor.check(); err != nil {
m.notify(errors.Wrapf(err, "failed to ping hypervisor process")) m.notify(ctx, errors.Wrapf(err, "failed to ping hypervisor process"))
return err return err
} }
return nil return nil

View File

@ -219,7 +219,7 @@ const mountPerm = os.FileMode(0755)
// * recursively create the destination // * recursively create the destination
// pgtypes stands for propagation types, which are shared, private, slave, and ubind. // pgtypes stands for propagation types, which are shared, private, slave, and ubind.
func bindMount(ctx context.Context, source, destination string, readonly bool, pgtypes string) error { func bindMount(ctx context.Context, source, destination string, readonly bool, pgtypes string) error {
span, _ := trace(ctx, "bindMount") span, ctx := trace(ctx, "bindMount")
defer span.End() defer span.End()
if source == "" { if source == "" {
@ -347,7 +347,7 @@ func bindUnmountContainerRootfs(ctx context.Context, sharedDir, cID string) erro
} }
func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbox) error { func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbox) error {
span, _ := trace(ctx, "bindUnmountAllRootfs") span, ctx := trace(ctx, "bindUnmountAllRootfs")
defer span.End() defer span.End()
var errors *merr.Error var errors *merr.Error
@ -356,11 +356,11 @@ func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbo
mountLogger().WithField("container", c.id).Warnf("container dir is a symlink, malicious guest?") mountLogger().WithField("container", c.id).Warnf("container dir is a symlink, malicious guest?")
continue continue
} }
c.unmountHostMounts() c.unmountHostMounts(ctx)
if c.state.Fstype == "" { if c.state.Fstype == "" {
// even if error found, don't break out of loop until all mounts attempted // even if error found, don't break out of loop until all mounts attempted
// to be unmounted, and collect all errors // to be unmounted, and collect all errors
errors = merr.Append(errors, bindUnmountContainerRootfs(c.ctx, sharedDir, c.id)) errors = merr.Append(errors, bindUnmountContainerRootfs(ctx, sharedDir, c.id))
} }
} }
return errors.ErrorOrNil() return errors.ErrorOrNil()

View File

@ -406,11 +406,11 @@ func getLinkByName(netHandle *netlink.Handle, name string, expectedLink netlink.
} }
// The endpoint type should dictate how the connection needs to happen. // The endpoint type should dictate how the connection needs to happen.
func xConnectVMNetwork(endpoint Endpoint, h hypervisor) error { func xConnectVMNetwork(ctx context.Context, endpoint Endpoint, h hypervisor) error {
netPair := endpoint.NetworkPair() netPair := endpoint.NetworkPair()
queues := 0 queues := 0
caps := h.capabilities() caps := h.capabilities(ctx)
if caps.IsMultiQueueSupported() { if caps.IsMultiQueueSupported() {
queues = int(h.hypervisorConfig().NumVCPUs) queues = int(h.hypervisorConfig().NumVCPUs)
} }
@ -1262,8 +1262,8 @@ func (n *Network) trace(ctx context.Context, name string) (otelTrace.Span, conte
} }
// Run runs a callback in the specified network namespace. // Run runs a callback in the specified network namespace.
func (n *Network) Run(networkNSPath string, cb func() error) error { func (n *Network) Run(ctx context.Context, networkNSPath string, cb func() error) error {
span, _ := n.trace(context.Background(), "run") span, _ := n.trace(ctx, "Run")
defer span.End() defer span.End()
return doNetNS(networkNSPath, func(_ ns.NetNS) error { return doNetNS(networkNSPath, func(_ ns.NetNS) error {
@ -1273,7 +1273,7 @@ func (n *Network) Run(networkNSPath string, cb func() error) error {
// Add adds all needed interfaces inside the network namespace. // Add adds all needed interfaces inside the network namespace.
func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, hotplug bool) ([]Endpoint, error) { func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, hotplug bool) ([]Endpoint, error) {
span, _ := n.trace(ctx, "add") span, ctx := n.trace(ctx, "Add")
defer span.End() defer span.End()
endpoints, err := createEndpointsFromScan(config.NetNSPath, config) endpoints, err := createEndpointsFromScan(config.NetNSPath, config)
@ -1285,11 +1285,11 @@ func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, ho
for _, endpoint := range endpoints { for _, endpoint := range endpoints {
networkLogger().WithField("endpoint-type", endpoint.Type()).WithField("hotplug", hotplug).Info("Attaching endpoint") networkLogger().WithField("endpoint-type", endpoint.Type()).WithField("hotplug", hotplug).Info("Attaching endpoint")
if hotplug { if hotplug {
if err := endpoint.HotAttach(s.hypervisor); err != nil { if err := endpoint.HotAttach(ctx, s.hypervisor); err != nil {
return err return err
} }
} else { } else {
if err := endpoint.Attach(s); err != nil { if err := endpoint.Attach(ctx, s); err != nil {
return err return err
} }
} }
@ -1354,7 +1354,7 @@ func (n *Network) PostAdd(ctx context.Context, ns *NetworkNamespace, hotplug boo
// Remove network endpoints in the network namespace. It also deletes the network // Remove network endpoints in the network namespace. It also deletes the network
// namespace in case the namespace has been created by us. // namespace in case the namespace has been created by us.
func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor hypervisor) error { func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor hypervisor) error {
span, _ := n.trace(ctx, "remove") span, ctx := n.trace(ctx, "Remove")
defer span.End() defer span.End()
for _, endpoint := range ns.Endpoints { for _, endpoint := range ns.Endpoints {
@ -1377,7 +1377,7 @@ func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor h
// Detach for an endpoint should enter the network namespace // Detach for an endpoint should enter the network namespace
// if required. // if required.
networkLogger().WithField("endpoint-type", endpoint.Type()).Info("Detaching endpoint") networkLogger().WithField("endpoint-type", endpoint.Type()).Info("Detaching endpoint")
if err := endpoint.Detach(ns.NetNsCreated, ns.NetNsPath); err != nil { if err := endpoint.Detach(ctx, ns.NetNsCreated, ns.NetNsPath); err != nil {
return err return err
} }
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -74,7 +75,7 @@ func (endpoint *PhysicalEndpoint) NetworkPair() *NetworkInterfacePair {
// Attach for physical endpoint binds the physical network interface to // Attach for physical endpoint binds the physical network interface to
// vfio-pci and adds device to the hypervisor with vfio-passthrough. // vfio-pci and adds device to the hypervisor with vfio-passthrough.
func (endpoint *PhysicalEndpoint) Attach(s *Sandbox) error { func (endpoint *PhysicalEndpoint) Attach(ctx context.Context, s *Sandbox) error {
// Unbind physical interface from host driver and bind to vfio // Unbind physical interface from host driver and bind to vfio
// so that it can be passed to qemu. // so that it can be passed to qemu.
vfioPath, err := bindNICToVFIO(endpoint) vfioPath, err := bindNICToVFIO(endpoint)
@ -95,13 +96,13 @@ func (endpoint *PhysicalEndpoint) Attach(s *Sandbox) error {
ColdPlug: true, ColdPlug: true,
} }
_, err = s.AddDevice(d) _, err = s.AddDevice(ctx, d)
return err return err
} }
// Detach for physical endpoint unbinds the physical network interface from vfio-pci // Detach for physical endpoint unbinds the physical network interface from vfio-pci
// and binds it back to the saved host driver. // and binds it back to the saved host driver.
func (endpoint *PhysicalEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *PhysicalEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
// Bind back the physical network interface to host. // Bind back the physical network interface to host.
// We need to do this even if a new network namespace has not // We need to do this even if a new network namespace has not
// been created by virtcontainers. // been created by virtcontainers.
@ -112,12 +113,12 @@ func (endpoint *PhysicalEndpoint) Detach(netNsCreated bool, netNsPath string) er
} }
// HotAttach for physical endpoint not supported yet // HotAttach for physical endpoint not supported yet
func (endpoint *PhysicalEndpoint) HotAttach(h hypervisor) error { func (endpoint *PhysicalEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
return fmt.Errorf("PhysicalEndpoint does not support Hot attach") return fmt.Errorf("PhysicalEndpoint does not support Hot attach")
} }
// HotDetach for physical endpoint not supported yet // HotDetach for physical endpoint not supported yet
func (endpoint *PhysicalEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *PhysicalEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("PhysicalEndpoint does not support Hot detach") return fmt.Errorf("PhysicalEndpoint does not support Hot detach")
} }

View File

@ -26,7 +26,7 @@ func TestPhysicalEndpoint_HotAttach(t *testing.T) {
h := &mockHypervisor{} h := &mockHypervisor{}
err := v.HotAttach(h) err := v.HotAttach(context.Background(), h)
assert.Error(err) assert.Error(err)
} }
@ -39,7 +39,7 @@ func TestPhysicalEndpoint_HotDetach(t *testing.T) {
h := &mockHypervisor{} h := &mockHypervisor{}
err := v.HotDetach(h, true, "") err := v.HotDetach(context.Background(), h, true, "")
assert.Error(err) assert.Error(err)
} }

View File

@ -7,6 +7,7 @@ package cgroups
import ( import (
"bufio" "bufio"
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -295,7 +296,7 @@ func (m *Manager) Destroy() error {
} }
// AddDevice adds a device to the device cgroup // AddDevice adds a device to the device cgroup
func (m *Manager) AddDevice(device string) error { func (m *Manager) AddDevice(ctx context.Context, device string) error {
cgroups, err := m.GetCgroups() cgroups, err := m.GetCgroups()
if err != nil { if err != nil {
return err return err

View File

@ -65,7 +65,7 @@ func (s *Sandbox) GetContainer(containerID string) vc.VCContainer {
} }
// Release implements the VCSandbox function of the same name. // Release implements the VCSandbox function of the same name.
func (s *Sandbox) Release() error { func (s *Sandbox) Release(ctx context.Context) error {
return nil return nil
} }
@ -90,12 +90,12 @@ func (s *Sandbox) Resume() error {
} }
// Delete implements the VCSandbox function of the same name. // Delete implements the VCSandbox function of the same name.
func (s *Sandbox) Delete() error { func (s *Sandbox) Delete(ctx context.Context) error {
return nil return nil
} }
// CreateContainer implements the VCSandbox function of the same name. // CreateContainer implements the VCSandbox function of the same name.
func (s *Sandbox) CreateContainer(conf vc.ContainerConfig) (vc.VCContainer, error) { func (s *Sandbox) CreateContainer(ctx context.Context, conf vc.ContainerConfig) (vc.VCContainer, error) {
if s.CreateContainerFunc != nil { if s.CreateContainerFunc != nil {
return s.CreateContainerFunc(conf) return s.CreateContainerFunc(conf)
} }
@ -103,12 +103,12 @@ func (s *Sandbox) CreateContainer(conf vc.ContainerConfig) (vc.VCContainer, erro
} }
// DeleteContainer implements the VCSandbox function of the same name. // DeleteContainer implements the VCSandbox function of the same name.
func (s *Sandbox) DeleteContainer(contID string) (vc.VCContainer, error) { func (s *Sandbox) DeleteContainer(ctx context.Context, contID string) (vc.VCContainer, error) {
return &Container{}, nil return &Container{}, nil
} }
// StartContainer implements the VCSandbox function of the same name. // StartContainer implements the VCSandbox function of the same name.
func (s *Sandbox) StartContainer(contID string) (vc.VCContainer, error) { func (s *Sandbox) StartContainer(ctx context.Context, contID string) (vc.VCContainer, error) {
return &Container{}, nil return &Container{}, nil
} }
@ -118,7 +118,7 @@ func (s *Sandbox) StopContainer(contID string, force bool) (vc.VCContainer, erro
} }
// KillContainer implements the VCSandbox function of the same name. // KillContainer implements the VCSandbox function of the same name.
func (s *Sandbox) KillContainer(contID string, signal syscall.Signal, all bool) error { func (s *Sandbox) KillContainer(ctx context.Context, contID string, signal syscall.Signal, all bool) error {
return nil return nil
} }
@ -128,7 +128,7 @@ func (s *Sandbox) StatusContainer(contID string) (vc.ContainerStatus, error) {
} }
// StatsContainer implements the VCSandbox function of the same name. // StatsContainer implements the VCSandbox function of the same name.
func (s *Sandbox) StatsContainer(contID string) (vc.ContainerStats, error) { func (s *Sandbox) StatsContainer(ctx context.Context, contID string) (vc.ContainerStats, error) {
if s.StatsContainerFunc != nil { if s.StatsContainerFunc != nil {
return s.StatsContainerFunc(contID) return s.StatsContainerFunc(contID)
} }
@ -136,12 +136,12 @@ func (s *Sandbox) StatsContainer(contID string) (vc.ContainerStats, error) {
} }
// PauseContainer implements the VCSandbox function of the same name. // PauseContainer implements the VCSandbox function of the same name.
func (s *Sandbox) PauseContainer(contID string) error { func (s *Sandbox) PauseContainer(ctx context.Context, contID string) error {
return nil return nil
} }
// ResumeContainer implements the VCSandbox function of the same name. // ResumeContainer implements the VCSandbox function of the same name.
func (s *Sandbox) ResumeContainer(contID string) error { func (s *Sandbox) ResumeContainer(ctx context.Context, contID string) error {
return nil return nil
} }
@ -151,37 +151,37 @@ func (s *Sandbox) Status() vc.SandboxStatus {
} }
// EnterContainer implements the VCSandbox function of the same name. // EnterContainer implements the VCSandbox function of the same name.
func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (vc.VCContainer, *vc.Process, error) { func (s *Sandbox) EnterContainer(ctx context.Context, containerID string, cmd types.Cmd) (vc.VCContainer, *vc.Process, error) {
return &Container{}, &vc.Process{}, nil return &Container{}, &vc.Process{}, nil
} }
// Monitor implements the VCSandbox function of the same name. // Monitor implements the VCSandbox function of the same name.
func (s *Sandbox) Monitor() (chan error, error) { func (s *Sandbox) Monitor(ctx context.Context) (chan error, error) {
return nil, nil return nil, nil
} }
// UpdateContainer implements the VCSandbox function of the same name. // UpdateContainer implements the VCSandbox function of the same name.
func (s *Sandbox) UpdateContainer(containerID string, resources specs.LinuxResources) error { func (s *Sandbox) UpdateContainer(ctx context.Context, containerID string, resources specs.LinuxResources) error {
return nil return nil
} }
// ProcessListContainer implements the VCSandbox function of the same name. // ProcessListContainer implements the VCSandbox function of the same name.
func (s *Sandbox) ProcessListContainer(containerID string, options vc.ProcessListOptions) (vc.ProcessList, error) { func (s *Sandbox) ProcessListContainer(ctx context.Context, containerID string, options vc.ProcessListOptions) (vc.ProcessList, error) {
return nil, nil return nil, nil
} }
// WaitProcess implements the VCSandbox function of the same name. // WaitProcess implements the VCSandbox function of the same name.
func (s *Sandbox) WaitProcess(containerID, processID string) (int32, error) { func (s *Sandbox) WaitProcess(ctx context.Context, containerID, processID string) (int32, error) {
return 0, nil return 0, nil
} }
// SignalProcess implements the VCSandbox function of the same name. // SignalProcess implements the VCSandbox function of the same name.
func (s *Sandbox) SignalProcess(containerID, processID string, signal syscall.Signal, all bool) error { func (s *Sandbox) SignalProcess(ctx context.Context, containerID, processID string, signal syscall.Signal, all bool) error {
return nil return nil
} }
// WinsizeProcess implements the VCSandbox function of the same name. // WinsizeProcess implements the VCSandbox function of the same name.
func (s *Sandbox) WinsizeProcess(containerID, processID string, height, width uint32) error { func (s *Sandbox) WinsizeProcess(ctx context.Context, containerID, processID string, height, width uint32) error {
return nil return nil
} }
@ -191,36 +191,36 @@ func (s *Sandbox) IOStream(containerID, processID string) (io.WriteCloser, io.Re
} }
// AddDevice adds a device to sandbox // AddDevice adds a device to sandbox
func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) { func (s *Sandbox) AddDevice(ctx context.Context, info config.DeviceInfo) (api.Device, error) {
return nil, nil return nil, nil
} }
// AddInterface implements the VCSandbox function of the same name. // AddInterface implements the VCSandbox function of the same name.
func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) { func (s *Sandbox) AddInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
return nil, nil return nil, nil
} }
// RemoveInterface implements the VCSandbox function of the same name. // RemoveInterface implements the VCSandbox function of the same name.
func (s *Sandbox) RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) { func (s *Sandbox) RemoveInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
return nil, nil return nil, nil
} }
// ListInterfaces implements the VCSandbox function of the same name. // ListInterfaces implements the VCSandbox function of the same name.
func (s *Sandbox) ListInterfaces() ([]*pbTypes.Interface, error) { func (s *Sandbox) ListInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
return nil, nil return nil, nil
} }
// UpdateRoutes implements the VCSandbox function of the same name. // UpdateRoutes implements the VCSandbox function of the same name.
func (s *Sandbox) UpdateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) { func (s *Sandbox) UpdateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
return nil, nil return nil, nil
} }
// ListRoutes implements the VCSandbox function of the same name. // ListRoutes implements the VCSandbox function of the same name.
func (s *Sandbox) ListRoutes() ([]*pbTypes.Route, error) { func (s *Sandbox) ListRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
return nil, nil return nil, nil
} }
func (s *Sandbox) GetOOMEvent() (string, error) { func (s *Sandbox) GetOOMEvent(ctx context.Context) (string, error) {
return "", nil return "", nil
} }
@ -233,7 +233,7 @@ func (s *Sandbox) UpdateRuntimeMetrics() error {
} }
// GetAgentMetrics implements the VCSandbox function of the same name. // GetAgentMetrics implements the VCSandbox function of the same name.
func (s *Sandbox) GetAgentMetrics() (string, error) { func (s *Sandbox) GetAgentMetrics(ctx context.Context) (string, error) {
if s.GetAgentMetricsFunc != nil { if s.GetAgentMetricsFunc != nil {
return s.GetAgentMetricsFunc() return s.GetAgentMetricsFunc()
} }
@ -241,7 +241,7 @@ func (s *Sandbox) GetAgentMetrics() (string, error) {
} }
// Stats implements the VCSandbox function of the same name. // Stats implements the VCSandbox function of the same name.
func (s *Sandbox) Stats() (vc.SandboxStats, error) { func (s *Sandbox) Stats(ctx context.Context) (vc.SandboxStats, error) {
if s.StatsFunc != nil { if s.StatsFunc != nil {
return s.StatsFunc() return s.StatsFunc()
} }

View File

@ -185,8 +185,8 @@ func (q *qemu) kernelParameters() string {
} }
// Adds all capabilities supported by qemu implementation of hypervisor interface // Adds all capabilities supported by qemu implementation of hypervisor interface
func (q *qemu) capabilities() types.Capabilities { func (q *qemu) capabilities(ctx context.Context) types.Capabilities {
span, _ := q.trace("capabilities") span, _ := q.trace(ctx, "capabilities")
defer span.End() defer span.End()
return q.arch.capabilities() return q.arch.capabilities()
@ -214,22 +214,22 @@ func (q *qemu) qemuPath() (string, error) {
return p, nil return p, nil
} }
func (q *qemu) trace(name string) (otelTrace.Span, context.Context) { func (q *qemu) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if q.ctx == nil { if parent == nil {
q.Logger().WithField("type", "bug").Error("trace called before context set") q.Logger().WithField("type", "bug").Error("trace called before context set")
q.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(q.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("qemu")}...) span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("qemu")}...)
return span, ctx return span, ctx
} }
// setup sets the Qemu structure up. // setup sets the Qemu structure up.
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig) error { func (q *qemu) setup(ctx context.Context, id string, hypervisorConfig *HypervisorConfig) error {
span, _ := q.trace("setup") span, _ := q.trace(ctx, "setup")
defer span.End() defer span.End()
err := hypervisorConfig.valid() err := hypervisorConfig.valid()
@ -387,10 +387,10 @@ func (q *qemu) createQmpSocket() ([]govmmQemu.QMPSocket, error) {
}, nil }, nil
} }
func (q *qemu) buildDevices(initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) { func (q *qemu) buildDevices(ctx context.Context, initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) {
var devices []govmmQemu.Device var devices []govmmQemu.Device
_, console, err := q.getSandboxConsole(q.id) _, console, err := q.getSandboxConsole(ctx, q.id)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -472,10 +472,10 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
// Save the tracing context // Save the tracing context
q.ctx = ctx q.ctx = ctx
span, _ := q.trace("createSandbox") span, ctx := q.trace(ctx, "createSandbox")
defer span.End() defer span.End()
if err := q.setup(id, hypervisorConfig); err != nil { if err := q.setup(ctx, id, hypervisorConfig); err != nil {
return err return err
} }
@ -562,7 +562,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
return err return err
} }
devices, ioThread, err := q.buildDevices(initrdPath) devices, ioThread, err := q.buildDevices(ctx, initrdPath)
if err != nil { if err != nil {
return err return err
} }
@ -658,7 +658,7 @@ func (q *qemu) virtiofsdArgs(fd uintptr) []string {
return args return args
} }
func (q *qemu) setupVirtiofsd() (err error) { func (q *qemu) setupVirtiofsd(ctx context.Context) (err error) {
var listener *net.UnixListener var listener *net.UnixListener
var fd *os.File var fd *os.File
@ -707,7 +707,7 @@ func (q *qemu) setupVirtiofsd() (err error) {
q.Logger().Info("virtiofsd quits") q.Logger().Info("virtiofsd quits")
// Wait to release resources of virtiofsd process // Wait to release resources of virtiofsd process
cmd.Process.Wait() cmd.Process.Wait()
q.stopSandbox() q.stopSandbox(ctx)
}() }()
return err return err
} }
@ -775,8 +775,8 @@ func (q *qemu) setupVirtioMem() error {
} }
// startSandbox will start the Sandbox's VM. // startSandbox will start the Sandbox's VM.
func (q *qemu) startSandbox(timeout int) error { func (q *qemu) startSandbox(ctx context.Context, timeout int) error {
span, _ := q.trace("startSandbox") span, ctx := q.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if q.config.Debug { if q.config.Debug {
@ -828,7 +828,7 @@ func (q *qemu) startSandbox(timeout int) error {
defer label.SetProcessLabel("") defer label.SetProcessLabel("")
if q.config.SharedFS == config.VirtioFS { if q.config.SharedFS == config.VirtioFS {
err = q.setupVirtiofsd() err = q.setupVirtiofsd(ctx)
if err != nil { if err != nil {
return err return err
} }
@ -848,7 +848,7 @@ func (q *qemu) startSandbox(timeout int) error {
return fmt.Errorf("failed to launch qemu: %s, error messages from qemu log: %s", err, strErr) return fmt.Errorf("failed to launch qemu: %s, error messages from qemu log: %s", err, strErr)
} }
err = q.waitSandbox(timeout) err = q.waitSandbox(ctx, timeout)
if err != nil { if err != nil {
return err return err
} }
@ -886,8 +886,8 @@ func (q *qemu) bootFromTemplate() error {
} }
// waitSandbox will wait for the Sandbox's VM to be up and running. // waitSandbox will wait for the Sandbox's VM to be up and running.
func (q *qemu) waitSandbox(timeout int) error { func (q *qemu) waitSandbox(ctx context.Context, timeout int) error {
span, _ := q.trace("waitSandbox") span, _ := q.trace(ctx, "waitSandbox")
defer span.End() defer span.End()
if timeout < 0 { if timeout < 0 {
@ -940,8 +940,8 @@ func (q *qemu) waitSandbox(timeout int) error {
} }
// stopSandbox will stop the Sandbox's VM. // stopSandbox will stop the Sandbox's VM.
func (q *qemu) stopSandbox() error { func (q *qemu) stopSandbox(ctx context.Context) error {
span, _ := q.trace("stopSandbox") span, _ := q.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
q.Logger().Info("Stopping Sandbox") q.Logger().Info("Stopping Sandbox")
@ -1014,8 +1014,8 @@ func (q *qemu) cleanupVM() error {
return nil return nil
} }
func (q *qemu) togglePauseSandbox(pause bool) error { func (q *qemu) togglePauseSandbox(ctx context.Context, pause bool) error {
span, _ := q.trace("togglePauseSandbox") span, _ := q.trace(ctx, "togglePauseSandbox")
defer span.End() defer span.End()
if err := q.qmpSetup(); err != nil { if err := q.qmpSetup(); err != nil {
@ -1210,7 +1210,7 @@ func (q *qemu) qmpShutdown() {
} }
} }
func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, devID string) (err error) { func (q *qemu) hotplugAddBlockDevice(ctx context.Context, drive *config.BlockDrive, op operation, devID string) (err error) {
// drive can be a pmem device, in which case it's used as backing file for a nvdimm device // drive can be a pmem device, in which case it's used as backing file for a nvdimm device
if q.config.BlockDeviceDriver == config.Nvdimm || drive.Pmem { if q.config.BlockDeviceDriver == config.Nvdimm || drive.Pmem {
var blocksize int64 var blocksize int64
@ -1260,7 +1260,7 @@ func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, dev
case q.config.BlockDeviceDriver == config.VirtioBlockCCW: case q.config.BlockDeviceDriver == config.VirtioBlockCCW:
driver := "virtio-blk-ccw" driver := "virtio-blk-ccw"
addr, bridge, err := q.arch.addDeviceToBridge(drive.ID, types.CCW) addr, bridge, err := q.arch.addDeviceToBridge(ctx, drive.ID, types.CCW)
if err != nil { if err != nil {
return err return err
} }
@ -1278,7 +1278,7 @@ func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, dev
} }
case q.config.BlockDeviceDriver == config.VirtioBlock: case q.config.BlockDeviceDriver == config.VirtioBlock:
driver := "virtio-blk-pci" driver := "virtio-blk-pci"
addr, bridge, err := q.arch.addDeviceToBridge(drive.ID, types.PCI) addr, bridge, err := q.arch.addDeviceToBridge(ctx, drive.ID, types.PCI)
if err != nil { if err != nil {
return err return err
} }
@ -1327,7 +1327,7 @@ func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, dev
return nil return nil
} }
func (q *qemu) hotplugAddVhostUserBlkDevice(vAttr *config.VhostUserDeviceAttrs, op operation, devID string) (err error) { func (q *qemu) hotplugAddVhostUserBlkDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation, devID string) (err error) {
err = q.qmpMonitorCh.qmp.ExecuteCharDevUnixSocketAdd(q.qmpMonitorCh.ctx, vAttr.DevID, vAttr.SocketPath, false, false) err = q.qmpMonitorCh.qmp.ExecuteCharDevUnixSocketAdd(q.qmpMonitorCh.ctx, vAttr.DevID, vAttr.SocketPath, false, false)
if err != nil { if err != nil {
return err return err
@ -1340,7 +1340,7 @@ func (q *qemu) hotplugAddVhostUserBlkDevice(vAttr *config.VhostUserDeviceAttrs,
}() }()
driver := "vhost-user-blk-pci" driver := "vhost-user-blk-pci"
addr, bridge, err := q.arch.addDeviceToBridge(vAttr.DevID, types.PCI) addr, bridge, err := q.arch.addDeviceToBridge(ctx, vAttr.DevID, types.PCI)
if err != nil { if err != nil {
return err return err
} }
@ -1368,7 +1368,7 @@ func (q *qemu) hotplugAddVhostUserBlkDevice(vAttr *config.VhostUserDeviceAttrs,
return nil return nil
} }
func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error { func (q *qemu) hotplugBlockDevice(ctx context.Context, drive *config.BlockDrive, op operation) error {
if err := q.qmpSetup(); err != nil { if err := q.qmpSetup(); err != nil {
return err return err
} }
@ -1376,7 +1376,7 @@ func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error
devID := "virtio-" + drive.ID devID := "virtio-" + drive.ID
if op == addDevice { if op == addDevice {
return q.hotplugAddBlockDevice(drive, op, devID) return q.hotplugAddBlockDevice(ctx, drive, op, devID)
} else { } else {
if q.config.BlockDeviceDriver == config.VirtioBlock { if q.config.BlockDeviceDriver == config.VirtioBlock {
if err := q.arch.removeDeviceFromBridge(drive.ID); err != nil { if err := q.arch.removeDeviceFromBridge(drive.ID); err != nil {
@ -1392,7 +1392,7 @@ func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error
} }
} }
func (q *qemu) hotplugVhostUserDevice(vAttr *config.VhostUserDeviceAttrs, op operation) error { func (q *qemu) hotplugVhostUserDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation) error {
if err := q.qmpSetup(); err != nil { if err := q.qmpSetup(); err != nil {
return err return err
} }
@ -1402,7 +1402,7 @@ func (q *qemu) hotplugVhostUserDevice(vAttr *config.VhostUserDeviceAttrs, op ope
if op == addDevice { if op == addDevice {
switch vAttr.Type { switch vAttr.Type {
case config.VhostUserBlk: case config.VhostUserBlk:
return q.hotplugAddVhostUserBlkDevice(vAttr, op, devID) return q.hotplugAddVhostUserBlkDevice(ctx, vAttr, op, devID)
default: default:
return fmt.Errorf("Incorrect vhost-user device type found") return fmt.Errorf("Incorrect vhost-user device type found")
} }
@ -1419,7 +1419,7 @@ func (q *qemu) hotplugVhostUserDevice(vAttr *config.VhostUserDeviceAttrs, op ope
} }
} }
func (q *qemu) hotplugVFIODevice(device *config.VFIODev, op operation) (err error) { func (q *qemu) hotplugVFIODevice(ctx context.Context, device *config.VFIODev, op operation) (err error) {
if err = q.qmpSetup(); err != nil { if err = q.qmpSetup(); err != nil {
return err return err
} }
@ -1466,7 +1466,7 @@ func (q *qemu) hotplugVFIODevice(device *config.VFIODev, op operation) (err erro
} }
} }
addr, bridge, err := q.arch.addDeviceToBridge(devID, types.PCI) addr, bridge, err := q.arch.addDeviceToBridge(ctx, devID, types.PCI)
if err != nil { if err != nil {
return err return err
} }
@ -1524,7 +1524,7 @@ func (q *qemu) hotAddNetDevice(name, hardAddr string, VMFds, VhostFds []*os.File
return q.qmpMonitorCh.qmp.ExecuteNetdevAddByFds(q.qmpMonitorCh.ctx, "tap", name, VMFdNames, VhostFdNames) return q.qmpMonitorCh.qmp.ExecuteNetdevAddByFds(q.qmpMonitorCh.ctx, "tap", name, VMFdNames, VhostFdNames)
} }
func (q *qemu) hotplugNetDevice(endpoint Endpoint, op operation) (err error) { func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op operation) (err error) {
if err = q.qmpSetup(); err != nil { if err = q.qmpSetup(); err != nil {
return err return err
} }
@ -1553,7 +1553,7 @@ func (q *qemu) hotplugNetDevice(endpoint Endpoint, op operation) (err error) {
} }
}() }()
addr, bridge, err := q.arch.addDeviceToBridge(tap.ID, types.PCI) addr, bridge, err := q.arch.addDeviceToBridge(ctx, tap.ID, types.PCI)
if err != nil { if err != nil {
return err return err
} }
@ -1599,36 +1599,36 @@ func (q *qemu) hotplugNetDevice(endpoint Endpoint, op operation) (err error) {
return q.qmpMonitorCh.qmp.ExecuteNetdevDel(q.qmpMonitorCh.ctx, tap.Name) return q.qmpMonitorCh.qmp.ExecuteNetdevDel(q.qmpMonitorCh.ctx, tap.Name)
} }
func (q *qemu) hotplugDevice(devInfo interface{}, devType deviceType, op operation) (interface{}, error) { func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType deviceType, op operation) (interface{}, error) {
switch devType { switch devType {
case blockDev: case blockDev:
drive := devInfo.(*config.BlockDrive) drive := devInfo.(*config.BlockDrive)
return nil, q.hotplugBlockDevice(drive, op) return nil, q.hotplugBlockDevice(ctx, drive, op)
case cpuDev: case cpuDev:
vcpus := devInfo.(uint32) vcpus := devInfo.(uint32)
return q.hotplugCPUs(vcpus, op) return q.hotplugCPUs(vcpus, op)
case vfioDev: case vfioDev:
device := devInfo.(*config.VFIODev) device := devInfo.(*config.VFIODev)
return nil, q.hotplugVFIODevice(device, op) return nil, q.hotplugVFIODevice(ctx, device, op)
case memoryDev: case memoryDev:
memdev := devInfo.(*memoryDevice) memdev := devInfo.(*memoryDevice)
return q.hotplugMemory(memdev, op) return q.hotplugMemory(memdev, op)
case netDev: case netDev:
device := devInfo.(Endpoint) device := devInfo.(Endpoint)
return nil, q.hotplugNetDevice(device, op) return nil, q.hotplugNetDevice(ctx, device, op)
case vhostuserDev: case vhostuserDev:
vAttr := devInfo.(*config.VhostUserDeviceAttrs) vAttr := devInfo.(*config.VhostUserDeviceAttrs)
return nil, q.hotplugVhostUserDevice(vAttr, op) return nil, q.hotplugVhostUserDevice(ctx, vAttr, op)
default: default:
return nil, fmt.Errorf("cannot hotplug device: unsupported device type '%v'", devType) return nil, fmt.Errorf("cannot hotplug device: unsupported device type '%v'", devType)
} }
} }
func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := q.trace("hotplugAddDevice") span, ctx := q.trace(ctx, "hotplugAddDevice")
defer span.End() defer span.End()
data, err := q.hotplugDevice(devInfo, devType, addDevice) data, err := q.hotplugDevice(ctx, devInfo, devType, addDevice)
if err != nil { if err != nil {
return data, err return data, err
} }
@ -1636,11 +1636,11 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
return data, nil return data, nil
} }
func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) { func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, _ := q.trace("hotplugRemoveDevice") span, ctx := q.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
data, err := q.hotplugDevice(devInfo, devType, removeDevice) data, err := q.hotplugDevice(ctx, devInfo, devType, removeDevice)
if err != nil { if err != nil {
return data, err return data, err
} }
@ -1848,24 +1848,24 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
return memDev.sizeMB, nil return memDev.sizeMB, nil
} }
func (q *qemu) pauseSandbox() error { func (q *qemu) pauseSandbox(ctx context.Context) error {
span, _ := q.trace("pauseSandbox") span, ctx := q.trace(ctx, "pauseSandbox")
defer span.End() defer span.End()
return q.togglePauseSandbox(true) return q.togglePauseSandbox(ctx, true)
} }
func (q *qemu) resumeSandbox() error { func (q *qemu) resumeSandbox(ctx context.Context) error {
span, _ := q.trace("resumeSandbox") span, ctx := q.trace(ctx, "resumeSandbox")
defer span.End() defer span.End()
return q.togglePauseSandbox(false) return q.togglePauseSandbox(ctx, false)
} }
// addDevice will add extra devices to Qemu command line. // addDevice will add extra devices to Qemu command line.
func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error { func (q *qemu) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
var err error var err error
span, _ := q.trace("addDevice") span, _ := q.trace(ctx, "addDevice")
defer span.End() defer span.End()
switch v := devInfo.(type) { switch v := devInfo.(type) {
@ -1922,8 +1922,8 @@ func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error {
// getSandboxConsole builds the path of the console where we can read // getSandboxConsole builds the path of the console where we can read
// logs coming from the sandbox. // logs coming from the sandbox.
func (q *qemu) getSandboxConsole(id string) (string, string, error) { func (q *qemu) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
span, _ := q.trace("getSandboxConsole") span, _ := q.trace(ctx, "getSandboxConsole")
defer span.End() defer span.End()
consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket) consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket)
@ -1987,8 +1987,8 @@ func (q *qemu) waitMigration() error {
return nil return nil
} }
func (q *qemu) disconnect() { func (q *qemu) disconnect(ctx context.Context) {
span, _ := q.trace("disconnect") span, _ := q.trace(ctx, "disconnect")
defer span.End() defer span.End()
q.qmpShutdown() q.qmpShutdown()
@ -2005,7 +2005,7 @@ func (q *qemu) disconnect() {
// the memory to remove has to be at least the size of one slot. // the memory to remove has to be at least the size of one slot.
// To return memory back we are resizing the VM memory balloon. // To return memory back we are resizing the VM memory balloon.
// A longer term solution is evaluate solutions like virtio-mem // A longer term solution is evaluate solutions like virtio-mem
func (q *qemu) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) { func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
currentMemory := q.config.MemorySize + uint32(q.state.HotpluggedMemory) currentMemory := q.config.MemorySize + uint32(q.state.HotpluggedMemory)
if err := q.qmpSetup(); err != nil { if err := q.qmpSetup(); err != nil {
@ -2035,7 +2035,7 @@ func (q *qemu) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe boo
addMemDevice.sizeMB = int(memHotplugMB) addMemDevice.sizeMB = int(memHotplugMB)
addMemDevice.probe = probe addMemDevice.probe = probe
data, err := q.hotplugAddDevice(&addMemDevice, memoryDev) data, err := q.hotplugAddDevice(ctx, &addMemDevice, memoryDev)
if err != nil { if err != nil {
return currentMemory, addMemDevice, err return currentMemory, addMemDevice, err
} }
@ -2055,7 +2055,7 @@ func (q *qemu) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe boo
addMemDevice.sizeMB = int(memHotunplugMB) addMemDevice.sizeMB = int(memHotunplugMB)
addMemDevice.probe = probe addMemDevice.probe = probe
data, err := q.hotplugRemoveDevice(&addMemDevice, memoryDev) data, err := q.hotplugRemoveDevice(ctx, &addMemDevice, memoryDev)
if err != nil { if err != nil {
return currentMemory, addMemDevice, err return currentMemory, addMemDevice, err
} }
@ -2192,8 +2192,8 @@ func genericAppendPCIeRootPort(devices []govmmQemu.Device, number uint32, machin
return devices return devices
} }
func (q *qemu) getThreadIDs() (vcpuThreadIDs, error) { func (q *qemu) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
span, _ := q.trace("getThreadIDs") span, _ := q.trace(ctx, "getThreadIDs")
defer span.End() defer span.End()
tid := vcpuThreadIDs{} tid := vcpuThreadIDs{}
@ -2225,7 +2225,7 @@ func calcHotplugMemMiBSize(mem uint32, memorySectionSizeMB uint32) (uint32, erro
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
} }
func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) { func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
currentVCPUs = q.config.NumVCPUs + uint32(len(q.state.HotpluggedVCPUs)) currentVCPUs = q.config.NumVCPUs + uint32(len(q.state.HotpluggedVCPUs))
newVCPUs = currentVCPUs newVCPUs = currentVCPUs
@ -2233,7 +2233,7 @@ func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint3
case currentVCPUs < reqVCPUs: case currentVCPUs < reqVCPUs:
//hotplug //hotplug
addCPUs := reqVCPUs - currentVCPUs addCPUs := reqVCPUs - currentVCPUs
data, err := q.hotplugAddDevice(addCPUs, cpuDev) data, err := q.hotplugAddDevice(ctx, addCPUs, cpuDev)
if err != nil { if err != nil {
return currentVCPUs, newVCPUs, err return currentVCPUs, newVCPUs, err
} }
@ -2245,7 +2245,7 @@ func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint3
case currentVCPUs > reqVCPUs: case currentVCPUs > reqVCPUs:
//hotunplug //hotunplug
removeCPUs := currentVCPUs - reqVCPUs removeCPUs := currentVCPUs - reqVCPUs
data, err := q.hotplugRemoveDevice(removeCPUs, cpuDev) data, err := q.hotplugRemoveDevice(ctx, removeCPUs, cpuDev)
if err != nil { if err != nil {
return currentVCPUs, newVCPUs, err return currentVCPUs, newVCPUs, err
} }
@ -2258,8 +2258,8 @@ func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint3
return currentVCPUs, newVCPUs, nil return currentVCPUs, newVCPUs, nil
} }
func (q *qemu) cleanup() error { func (q *qemu) cleanup(ctx context.Context) error {
span, _ := q.trace("cleanup") span, _ := q.trace(ctx, "cleanup")
defer span.End() defer span.End()
for _, fd := range q.fds { for _, fd := range q.fds {
@ -2333,10 +2333,10 @@ func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
return nil return nil
} }
func (q *qemu) toGrpc() ([]byte, error) { func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
q.qmpShutdown() q.qmpShutdown()
q.cleanup() q.cleanup(ctx)
qp := qemuGrpc{ qp := qemuGrpc{
ID: q.id, ID: q.id,
QmpChannelpath: q.qmpMonitorCh.path, QmpChannelpath: q.qmpMonitorCh.path,

View File

@ -105,7 +105,7 @@ type qemuArch interface {
appendRNGDevice(devices []govmmQemu.Device, rngDevice config.RNGDev) ([]govmmQemu.Device, error) appendRNGDevice(devices []govmmQemu.Device, rngDevice config.RNGDev) ([]govmmQemu.Device, error)
// addDeviceToBridge adds devices to the bus // addDeviceToBridge adds devices to the bus
addDeviceToBridge(ID string, t types.Type) (string, types.Bridge, error) addDeviceToBridge(ctx context.Context, ID string, t types.Type) (string, types.Bridge, error)
// removeDeviceFromBridge removes devices to the bus // removeDeviceFromBridge removes devices to the bus
removeDeviceFromBridge(ID string) error removeDeviceFromBridge(ID string) error
@ -722,8 +722,8 @@ func (q *qemuArchBase) setIgnoreSharedMemoryMigrationCaps(ctx context.Context, q
return err return err
} }
func (q *qemuArchBase) addDeviceToBridge(ID string, t types.Type) (string, types.Bridge, error) { func (q *qemuArchBase) addDeviceToBridge(ctx context.Context, ID string, t types.Type) (string, types.Bridge, error) {
addr, b, err := genericAddDeviceToBridge(q.Bridges, ID, t) addr, b, err := genericAddDeviceToBridge(ctx, q.Bridges, ID, t)
if err != nil { if err != nil {
return "", b, err return "", b, err
} }
@ -731,7 +731,7 @@ func (q *qemuArchBase) addDeviceToBridge(ID string, t types.Type) (string, types
return fmt.Sprintf("%02x", addr), b, nil return fmt.Sprintf("%02x", addr), b, nil
} }
func genericAddDeviceToBridge(bridges []types.Bridge, ID string, t types.Type) (uint32, types.Bridge, error) { func genericAddDeviceToBridge(ctx context.Context, bridges []types.Bridge, ID string, t types.Type) (uint32, types.Bridge, error) {
var err error var err error
var addr uint32 var addr uint32
@ -744,7 +744,7 @@ func genericAddDeviceToBridge(bridges []types.Bridge, ID string, t types.Type) (
if t != b.Type { if t != b.Type {
continue continue
} }
addr, err = b.AddDevice(ID) addr, err = b.AddDevice(ctx, ID)
if err == nil { if err == nil {
return addr, b, nil return addr, b, nil
} }

View File

@ -212,7 +212,7 @@ func testQemuAddDevice(t *testing.T, devInfo interface{}, devType deviceType, ex
arch: &qemuArchBase{}, arch: &qemuArchBase{},
} }
err := q.addDevice(devInfo, devType) err := q.addDevice(context.Background(), devInfo, devType)
assert.NoError(err) assert.NoError(err)
assert.Exactly(q.qemuConfig.Devices, expected) assert.Exactly(q.qemuConfig.Devices, expected)
} }
@ -402,9 +402,9 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) {
config: qemuConfig, config: qemuConfig,
} }
_, err := q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev) _, err := q.hotplugAddDevice(context.Background(), &memoryDevice{0, 128, uint64(0), false}, fsDev)
assert.Error(err) assert.Error(err)
_, err = q.hotplugRemoveDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev) _, err = q.hotplugRemoveDevice(context.Background(), &memoryDevice{0, 128, uint64(0), false}, fsDev)
assert.Error(err) assert.Error(err)
} }

View File

@ -127,14 +127,14 @@ type SandboxConfig struct {
Cgroups *configs.Cgroup Cgroups *configs.Cgroup
} }
func (s *Sandbox) trace(name string) (otelTrace.Span, context.Context) { func (s *Sandbox) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if s.ctx == nil { if parent == nil {
s.Logger().WithField("type", "bug").Error("trace called before context set") s.Logger().WithField("type", "bug").Error("trace called before context set")
s.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(s.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes(otelLabel.Key("subsystem").String("sandbox")) span.SetAttributes(otelLabel.Key("subsystem").String("sandbox"))
return span, ctx return span, ctx
@ -283,13 +283,13 @@ func (s *Sandbox) GetContainer(containerID string) VCContainer {
} }
// Release closes the agent connection and removes sandbox from internal list. // Release closes the agent connection and removes sandbox from internal list.
func (s *Sandbox) Release() error { func (s *Sandbox) Release(ctx context.Context) error {
s.Logger().Info("release sandbox") s.Logger().Info("release sandbox")
if s.monitor != nil { if s.monitor != nil {
s.monitor.stop() s.monitor.stop()
} }
s.hypervisor.disconnect() s.hypervisor.disconnect(ctx)
return s.agent.disconnect() return s.agent.disconnect(ctx)
} }
// Status gets the status of the sandbox // Status gets the status of the sandbox
@ -323,7 +323,7 @@ func (s *Sandbox) Status() SandboxStatus {
} }
// Monitor returns a error channel for watcher to watch at // Monitor returns a error channel for watcher to watch at
func (s *Sandbox) Monitor() (chan error, error) { func (s *Sandbox) Monitor(ctx context.Context) (chan error, error) {
if s.state.State != types.StateRunning { if s.state.State != types.StateRunning {
return nil, fmt.Errorf("Sandbox is not running") return nil, fmt.Errorf("Sandbox is not running")
} }
@ -334,11 +334,11 @@ func (s *Sandbox) Monitor() (chan error, error) {
} }
s.Unlock() s.Unlock()
return s.monitor.newWatcher() return s.monitor.newWatcher(ctx)
} }
// WaitProcess waits on a container process and return its exit code // WaitProcess waits on a container process and return its exit code
func (s *Sandbox) WaitProcess(containerID, processID string) (int32, error) { func (s *Sandbox) WaitProcess(ctx context.Context, containerID, processID string) (int32, error) {
if s.state.State != types.StateRunning { if s.state.State != types.StateRunning {
return 0, fmt.Errorf("Sandbox not running") return 0, fmt.Errorf("Sandbox not running")
} }
@ -348,12 +348,12 @@ func (s *Sandbox) WaitProcess(containerID, processID string) (int32, error) {
return 0, err return 0, err
} }
return c.wait(processID) return c.wait(ctx, processID)
} }
// SignalProcess sends a signal to a process of a container when all is false. // SignalProcess sends a signal to a process of a container when all is false.
// When all is true, it sends the signal to all processes of a container. // When all is true, it sends the signal to all processes of a container.
func (s *Sandbox) SignalProcess(containerID, processID string, signal syscall.Signal, all bool) error { func (s *Sandbox) SignalProcess(ctx context.Context, containerID, processID string, signal syscall.Signal, all bool) error {
if s.state.State != types.StateRunning { if s.state.State != types.StateRunning {
return fmt.Errorf("Sandbox not running") return fmt.Errorf("Sandbox not running")
} }
@ -363,11 +363,11 @@ func (s *Sandbox) SignalProcess(containerID, processID string, signal syscall.Si
return err return err
} }
return c.signalProcess(processID, signal, all) return c.signalProcess(ctx, processID, signal, all)
} }
// WinsizeProcess resizes the tty window of a process // WinsizeProcess resizes the tty window of a process
func (s *Sandbox) WinsizeProcess(containerID, processID string, height, width uint32) error { func (s *Sandbox) WinsizeProcess(ctx context.Context, containerID, processID string, height, width uint32) error {
if s.state.State != types.StateRunning { if s.state.State != types.StateRunning {
return fmt.Errorf("Sandbox not running") return fmt.Errorf("Sandbox not running")
} }
@ -377,7 +377,7 @@ func (s *Sandbox) WinsizeProcess(containerID, processID string, height, width ui
return err return err
} }
return c.winsizeProcess(processID, height, width) return c.winsizeProcess(ctx, processID, height, width)
} }
// IOStream returns stdin writer, stdout reader and stderr reader of a process // IOStream returns stdin writer, stdout reader and stderr reader of a process
@ -419,8 +419,8 @@ func createAssets(ctx context.Context, sandboxConfig *SandboxConfig) error {
return nil return nil
} }
func (s *Sandbox) getAndStoreGuestDetails() error { func (s *Sandbox) getAndStoreGuestDetails(ctx context.Context) error {
guestDetailRes, err := s.agent.getGuestDetails(&grpc.GuestDetailsRequest{ guestDetailRes, err := s.agent.getGuestDetails(ctx, &grpc.GuestDetailsRequest{
MemBlockSize: true, MemBlockSize: true,
MemHotplugProbe: true, MemHotplugProbe: true,
}) })
@ -470,7 +470,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
} }
// Below code path is called only during create, because of earlier check. // Below code path is called only during create, because of earlier check.
if err := s.agent.createSandbox(s); err != nil { if err := s.agent.createSandbox(ctx, s); err != nil {
return nil, err return nil, err
} }
@ -617,8 +617,8 @@ func (s *Sandbox) createCgroupManager() error {
} }
// storeSandbox stores a sandbox config. // storeSandbox stores a sandbox config.
func (s *Sandbox) storeSandbox() error { func (s *Sandbox) storeSandbox(ctx context.Context) error {
span, _ := s.trace("storeSandbox") span, ctx := s.trace(ctx, "storeSandbox")
defer span.End() defer span.End()
// flush data to storage // flush data to storage
@ -688,7 +688,7 @@ func (s *Sandbox) removeContainer(containerID string) error {
// Delete deletes an already created sandbox. // Delete deletes an already created sandbox.
// The VM in which the sandbox is running will be shut down. // The VM in which the sandbox is running will be shut down.
func (s *Sandbox) Delete() error { func (s *Sandbox) Delete(ctx context.Context) error {
if s.state.State != types.StateReady && if s.state.State != types.StateReady &&
s.state.State != types.StatePaused && s.state.State != types.StatePaused &&
s.state.State != types.StateStopped { s.state.State != types.StateStopped {
@ -696,7 +696,7 @@ func (s *Sandbox) Delete() error {
} }
for _, c := range s.containers { for _, c := range s.containers {
if err := c.delete(); err != nil { if err := c.delete(ctx); err != nil {
return err return err
} }
} }
@ -711,17 +711,18 @@ func (s *Sandbox) Delete() error {
s.monitor.stop() s.monitor.stop()
} }
if err := s.hypervisor.cleanup(); err != nil { if err := s.hypervisor.cleanup(ctx); err != nil {
s.Logger().WithError(err).Error("failed to cleanup hypervisor") s.Logger().WithError(err).Error("failed to cleanup hypervisor")
} }
s.agent.cleanup(s) s.agent.cleanup(ctx, s)
return s.newStore.Destroy(s.id) return s.newStore.Destroy(s.id)
} }
func (s *Sandbox) startNetworkMonitor() error { func (s *Sandbox) startNetworkMonitor(ctx context.Context) error {
span, _ := s.trace("startNetworkMonitor") var span otelTrace.Span
span, ctx = s.trace(ctx, "startNetworkMonitor")
defer span.End() defer span.End()
binPath, err := os.Executable() binPath, err := os.Executable()
@ -742,7 +743,7 @@ func (s *Sandbox) startNetworkMonitor() error {
sandboxID: s.id, sandboxID: s.id,
} }
return s.network.Run(s.networkNS.NetNsPath, func() error { return s.network.Run(ctx, s.networkNS.NetNsPath, func() error {
pid, err := startNetmon(params) pid, err := startNetmon(params)
if err != nil { if err != nil {
return err return err
@ -754,13 +755,13 @@ func (s *Sandbox) startNetworkMonitor() error {
}) })
} }
func (s *Sandbox) createNetwork() error { func (s *Sandbox) createNetwork(ctx context.Context) error {
if s.config.NetworkConfig.DisableNewNetNs || if s.config.NetworkConfig.DisableNewNetNs ||
s.config.NetworkConfig.NetNSPath == "" { s.config.NetworkConfig.NetNSPath == "" {
return nil return nil
} }
span, _ := s.trace("createNetwork") span, ctx := s.trace(ctx, "createNetwork")
defer span.End() defer span.End()
s.networkNS = NetworkNamespace{ s.networkNS = NetworkNamespace{
@ -772,7 +773,7 @@ func (s *Sandbox) createNetwork() error {
// after vm is started. // after vm is started.
if s.factory == nil { if s.factory == nil {
// Add the network // Add the network
endpoints, err := s.network.Add(s.ctx, &s.config.NetworkConfig, s, false) endpoints, err := s.network.Add(ctx, &s.config.NetworkConfig, s, false)
if err != nil { if err != nil {
return err return err
} }
@ -780,7 +781,7 @@ func (s *Sandbox) createNetwork() error {
s.networkNS.Endpoints = endpoints s.networkNS.Endpoints = endpoints
if s.config.NetworkConfig.NetmonConfig.Enable { if s.config.NetworkConfig.NetmonConfig.Enable {
if err := s.startNetworkMonitor(); err != nil { if err := s.startNetworkMonitor(ctx); err != nil {
return err return err
} }
} }
@ -788,13 +789,14 @@ func (s *Sandbox) createNetwork() error {
return nil return nil
} }
func (s *Sandbox) postCreatedNetwork() error { func (s *Sandbox) postCreatedNetwork(ctx context.Context) error {
return s.network.PostAdd(s.ctx, &s.networkNS, s.factory != nil) return s.network.PostAdd(ctx, &s.networkNS, s.factory != nil)
} }
func (s *Sandbox) removeNetwork() error { func (s *Sandbox) removeNetwork(ctx context.Context) error {
span, _ := s.trace("removeNetwork") var span otelTrace.Span
span, ctx = s.trace(ctx, "removeNetwork")
defer span.End() defer span.End()
if s.config.NetworkConfig.NetmonConfig.Enable { if s.config.NetworkConfig.NetmonConfig.Enable {
@ -803,7 +805,7 @@ func (s *Sandbox) removeNetwork() error {
} }
} }
return s.network.Remove(s.ctx, &s.networkNS, s.hypervisor) return s.network.Remove(ctx, &s.networkNS, s.hypervisor)
} }
func (s *Sandbox) generateNetInfo(inf *pbTypes.Interface) (NetworkInfo, error) { func (s *Sandbox) generateNetInfo(inf *pbTypes.Interface) (NetworkInfo, error) {
@ -837,7 +839,7 @@ func (s *Sandbox) generateNetInfo(inf *pbTypes.Interface) (NetworkInfo, error) {
} }
// AddInterface adds new nic to the sandbox. // AddInterface adds new nic to the sandbox.
func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) { func (s *Sandbox) AddInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
netInfo, err := s.generateNetInfo(inf) netInfo, err := s.generateNetInfo(inf)
if err != nil { if err != nil {
return nil, err return nil, err
@ -851,7 +853,7 @@ func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, erro
endpoint.SetProperties(netInfo) endpoint.SetProperties(netInfo)
if err := doNetNS(s.networkNS.NetNsPath, func(_ ns.NetNS) error { if err := doNetNS(s.networkNS.NetNsPath, func(_ ns.NetNS) error {
s.Logger().WithField("endpoint-type", endpoint.Type()).Info("Hot attaching endpoint") s.Logger().WithField("endpoint-type", endpoint.Type()).Info("Hot attaching endpoint")
return endpoint.HotAttach(s.hypervisor) return endpoint.HotAttach(ctx, s.hypervisor)
}); err != nil { }); err != nil {
return nil, err return nil, err
} }
@ -864,15 +866,15 @@ func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, erro
// Add network for vm // Add network for vm
inf.PciPath = endpoint.PciPath().String() inf.PciPath = endpoint.PciPath().String()
return s.agent.updateInterface(inf) return s.agent.updateInterface(ctx, inf)
} }
// RemoveInterface removes a nic of the sandbox. // RemoveInterface removes a nic of the sandbox.
func (s *Sandbox) RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) { func (s *Sandbox) RemoveInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
for i, endpoint := range s.networkNS.Endpoints { for i, endpoint := range s.networkNS.Endpoints {
if endpoint.HardwareAddr() == inf.HwAddr { if endpoint.HardwareAddr() == inf.HwAddr {
s.Logger().WithField("endpoint-type", endpoint.Type()).Info("Hot detaching endpoint") s.Logger().WithField("endpoint-type", endpoint.Type()).Info("Hot detaching endpoint")
if err := endpoint.HotDetach(s.hypervisor, s.networkNS.NetNsCreated, s.networkNS.NetNsPath); err != nil { if err := endpoint.HotDetach(ctx, s.hypervisor, s.networkNS.NetNsCreated, s.networkNS.NetNsPath); err != nil {
return inf, err return inf, err
} }
s.networkNS.Endpoints = append(s.networkNS.Endpoints[:i], s.networkNS.Endpoints[i+1:]...) s.networkNS.Endpoints = append(s.networkNS.Endpoints[:i], s.networkNS.Endpoints[i+1:]...)
@ -888,18 +890,18 @@ func (s *Sandbox) RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, e
} }
// ListInterfaces lists all nics and their configurations in the sandbox. // ListInterfaces lists all nics and their configurations in the sandbox.
func (s *Sandbox) ListInterfaces() ([]*pbTypes.Interface, error) { func (s *Sandbox) ListInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
return s.agent.listInterfaces() return s.agent.listInterfaces(ctx)
} }
// UpdateRoutes updates the sandbox route table (e.g. for portmapping support). // UpdateRoutes updates the sandbox route table (e.g. for portmapping support).
func (s *Sandbox) UpdateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) { func (s *Sandbox) UpdateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
return s.agent.updateRoutes(routes) return s.agent.updateRoutes(ctx, routes)
} }
// ListRoutes lists all routes and their configurations in the sandbox. // ListRoutes lists all routes and their configurations in the sandbox.
func (s *Sandbox) ListRoutes() ([]*pbTypes.Route, error) { func (s *Sandbox) ListRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
return s.agent.listRoutes() return s.agent.listRoutes(ctx)
} }
const ( const (
@ -918,13 +920,13 @@ type consoleWatcher struct {
ptyConsole *os.File ptyConsole *os.File
} }
func newConsoleWatcher(s *Sandbox) (*consoleWatcher, error) { func newConsoleWatcher(ctx context.Context, s *Sandbox) (*consoleWatcher, error) {
var ( var (
err error err error
cw consoleWatcher cw consoleWatcher
) )
cw.proto, cw.consoleURL, err = s.hypervisor.getSandboxConsole(s.id) cw.proto, cw.consoleURL, err = s.hypervisor.getSandboxConsole(ctx, s.id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1000,22 +1002,22 @@ func (cw *consoleWatcher) stop() {
} }
// startVM starts the VM. // startVM starts the VM.
func (s *Sandbox) startVM() (err error) { func (s *Sandbox) startVM(ctx context.Context) (err error) {
span, ctx := s.trace("startVM") span, ctx := s.trace(ctx, "startVM")
defer span.End() defer span.End()
s.Logger().Info("Starting VM") s.Logger().Info("Starting VM")
if s.config.HypervisorConfig.Debug { if s.config.HypervisorConfig.Debug {
// create console watcher // create console watcher
consoleWatcher, err := newConsoleWatcher(s) consoleWatcher, err := newConsoleWatcher(ctx, s)
if err != nil { if err != nil {
return err return err
} }
s.cw = consoleWatcher s.cw = consoleWatcher
} }
if err := s.network.Run(s.networkNS.NetNsPath, func() error { if err := s.network.Run(ctx, s.networkNS.NetNsPath, func() error {
if s.factory != nil { if s.factory != nil {
vm, err := s.factory.GetVM(ctx, VMConfig{ vm, err := s.factory.GetVM(ctx, VMConfig{
HypervisorType: s.config.HypervisorType, HypervisorType: s.config.HypervisorType,
@ -1029,21 +1031,21 @@ func (s *Sandbox) startVM() (err error) {
return vm.assignSandbox(s) return vm.assignSandbox(s)
} }
return s.hypervisor.startSandbox(vmStartTimeout) return s.hypervisor.startSandbox(ctx, vmStartTimeout)
}); err != nil { }); err != nil {
return err return err
} }
defer func() { defer func() {
if err != nil { if err != nil {
s.hypervisor.stopSandbox() s.hypervisor.stopSandbox(ctx)
} }
}() }()
// In case of vm factory, network interfaces are hotplugged // In case of vm factory, network interfaces are hotplugged
// after vm is started. // after vm is started.
if s.factory != nil { if s.factory != nil {
endpoints, err := s.network.Add(s.ctx, &s.config.NetworkConfig, s, true) endpoints, err := s.network.Add(ctx, &s.config.NetworkConfig, s, true)
if err != nil { if err != nil {
return err return err
} }
@ -1051,7 +1053,7 @@ func (s *Sandbox) startVM() (err error) {
s.networkNS.Endpoints = endpoints s.networkNS.Endpoints = endpoints
if s.config.NetworkConfig.NetmonConfig.Enable { if s.config.NetworkConfig.NetmonConfig.Enable {
if err := s.startNetworkMonitor(); err != nil { if err := s.startNetworkMonitor(ctx); err != nil {
return err return err
} }
} }
@ -1071,7 +1073,7 @@ func (s *Sandbox) startVM() (err error) {
// we want to guarantee that it is manageable. // we want to guarantee that it is manageable.
// For that we need to ask the agent to start the // For that we need to ask the agent to start the
// sandbox inside the VM. // sandbox inside the VM.
if err := s.agent.startSandbox(s); err != nil { if err := s.agent.startSandbox(ctx, s); err != nil {
return err return err
} }
@ -1081,12 +1083,12 @@ func (s *Sandbox) startVM() (err error) {
} }
// stopVM: stop the sandbox's VM // stopVM: stop the sandbox's VM
func (s *Sandbox) stopVM() error { func (s *Sandbox) stopVM(ctx context.Context) error {
span, _ := s.trace("stopVM") span, ctx := s.trace(ctx, "stopVM")
defer span.End() defer span.End()
s.Logger().Info("Stopping sandbox in the VM") s.Logger().Info("Stopping sandbox in the VM")
if err := s.agent.stopSandbox(s); err != nil { if err := s.agent.stopSandbox(ctx, s); err != nil {
s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("Agent did not stop sandbox") s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("Agent did not stop sandbox")
} }
@ -1097,7 +1099,7 @@ func (s *Sandbox) stopVM() error {
} }
s.Logger().Info("Stopping VM") s.Logger().Info("Stopping VM")
return s.hypervisor.stopSandbox() return s.hypervisor.stopSandbox(ctx)
} }
func (s *Sandbox) addContainer(c *Container) error { func (s *Sandbox) addContainer(c *Container) error {
@ -1112,9 +1114,9 @@ func (s *Sandbox) addContainer(c *Container) error {
// CreateContainer creates a new container in the sandbox // CreateContainer creates a new container in the sandbox
// This should be called only when the sandbox is already created. // This should be called only when the sandbox is already created.
// It will add new container config to sandbox.config.Containers // It will add new container config to sandbox.config.Containers
func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, error) { func (s *Sandbox) CreateContainer(ctx context.Context, contConfig ContainerConfig) (VCContainer, error) {
// Create the container object, add devices to the sandbox's device-manager: // Create the container object, add devices to the sandbox's device-manager:
c, err := newContainer(s, &contConfig) c, err := newContainer(ctx, s, &contConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1132,7 +1134,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
}() }()
// create and start the container // create and start the container
err = c.create() err = c.create(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1148,7 +1150,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
logger := s.Logger().WithFields(logrus.Fields{"container-id": c.id, "sandox-id": s.id, "rollback": true}) logger := s.Logger().WithFields(logrus.Fields{"container-id": c.id, "sandox-id": s.id, "rollback": true})
logger.WithError(err).Error("Cleaning up partially created container") logger.WithError(err).Error("Cleaning up partially created container")
if err2 := c.stop(true); err2 != nil { if err2 := c.stop(ctx, true); err2 != nil {
logger.WithError(err2).Error("Could not delete container") logger.WithError(err2).Error("Could not delete container")
} }
@ -1160,16 +1162,16 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
// Sandbox is responsible to update VM resources needed by Containers // Sandbox is responsible to update VM resources needed by Containers
// Update resources after having added containers to the sandbox, since // Update resources after having added containers to the sandbox, since
// container status is requiered to know if more resources should be added. // container status is requiered to know if more resources should be added.
err = s.updateResources() err = s.updateResources(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err = s.cgroupsUpdate(); err != nil { if err = s.cgroupsUpdate(ctx); err != nil {
return nil, err return nil, err
} }
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return nil, err return nil, err
} }
@ -1177,7 +1179,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
} }
// StartContainer starts a container in the sandbox // StartContainer starts a container in the sandbox
func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) { func (s *Sandbox) StartContainer(ctx context.Context, containerID string) (VCContainer, error) {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
@ -1185,12 +1187,12 @@ func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) {
} }
// Start it. // Start it.
err = c.start() err = c.start(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return nil, err return nil, err
} }
@ -1198,7 +1200,7 @@ func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) {
// Update sandbox resources in case a stopped container // Update sandbox resources in case a stopped container
// is started // is started
err = s.updateResources() err = s.updateResources(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1207,7 +1209,7 @@ func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) {
} }
// StopContainer stops a container in the sandbox // StopContainer stops a container in the sandbox
func (s *Sandbox) StopContainer(containerID string, force bool) (VCContainer, error) { func (s *Sandbox) StopContainer(ctx context.Context, containerID string, force bool) (VCContainer, error) {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
@ -1215,18 +1217,18 @@ func (s *Sandbox) StopContainer(containerID string, force bool) (VCContainer, er
} }
// Stop it. // Stop it.
if err := c.stop(force); err != nil { if err := c.stop(ctx, force); err != nil {
return nil, err return nil, err
} }
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return nil, err return nil, err
} }
return c, nil return c, nil
} }
// KillContainer signals a container in the sandbox // KillContainer signals a container in the sandbox
func (s *Sandbox) KillContainer(containerID string, signal syscall.Signal, all bool) error { func (s *Sandbox) KillContainer(ctx context.Context, containerID string, signal syscall.Signal, all bool) error {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
@ -1234,7 +1236,7 @@ func (s *Sandbox) KillContainer(containerID string, signal syscall.Signal, all b
} }
// Send a signal to the process. // Send a signal to the process.
err = c.kill(signal, all) err = c.kill(ctx, signal, all)
// SIGKILL should never fail otherwise it is // SIGKILL should never fail otherwise it is
// impossible to clean things up. // impossible to clean things up.
@ -1246,7 +1248,7 @@ func (s *Sandbox) KillContainer(containerID string, signal syscall.Signal, all b
} }
// DeleteContainer deletes a container from the sandbox // DeleteContainer deletes a container from the sandbox
func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) { func (s *Sandbox) DeleteContainer(ctx context.Context, containerID string) (VCContainer, error) {
if containerID == "" { if containerID == "" {
return nil, vcTypes.ErrNeedContainerID return nil, vcTypes.ErrNeedContainerID
} }
@ -1258,7 +1260,7 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
} }
// Delete it. // Delete it.
err = c.delete() err = c.delete(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1272,11 +1274,11 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
} }
// update the sandbox cgroup // update the sandbox cgroup
if err = s.cgroupsUpdate(); err != nil { if err = s.cgroupsUpdate(ctx); err != nil {
return nil, err return nil, err
} }
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return nil, err return nil, err
} }
return c, nil return c, nil
@ -1284,7 +1286,7 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
// ProcessListContainer lists every process running inside a specific // ProcessListContainer lists every process running inside a specific
// container in the sandbox. // container in the sandbox.
func (s *Sandbox) ProcessListContainer(containerID string, options ProcessListOptions) (ProcessList, error) { func (s *Sandbox) ProcessListContainer(ctx context.Context, containerID string, options ProcessListOptions) (ProcessList, error) {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
@ -1292,7 +1294,7 @@ func (s *Sandbox) ProcessListContainer(containerID string, options ProcessListOp
} }
// Get the process list related to the container. // Get the process list related to the container.
return c.processList(options) return c.processList(ctx, options)
} }
// StatusContainer gets the status of a container // StatusContainer gets the status of a container
@ -1323,7 +1325,7 @@ func (s *Sandbox) StatusContainer(containerID string) (ContainerStatus, error) {
// EnterContainer is the virtcontainers container command execution entry point. // EnterContainer is the virtcontainers container command execution entry point.
// EnterContainer enters an already running container and runs a given command. // EnterContainer enters an already running container and runs a given command.
func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (VCContainer, *Process, error) { func (s *Sandbox) EnterContainer(ctx context.Context, containerID string, cmd types.Cmd) (VCContainer, *Process, error) {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
@ -1331,7 +1333,7 @@ func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (VCContainer
} }
// Enter it. // Enter it.
process, err := c.enter(cmd) process, err := c.enter(ctx, cmd)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -1340,37 +1342,37 @@ func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (VCContainer
} }
// UpdateContainer update a running container. // UpdateContainer update a running container.
func (s *Sandbox) UpdateContainer(containerID string, resources specs.LinuxResources) error { func (s *Sandbox) UpdateContainer(ctx context.Context, containerID string, resources specs.LinuxResources) error {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
return err return err
} }
err = c.update(resources) err = c.update(ctx, resources)
if err != nil { if err != nil {
return err return err
} }
if err := s.cgroupsUpdate(); err != nil { if err := s.cgroupsUpdate(ctx); err != nil {
return err return err
} }
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return err return err
} }
return nil return nil
} }
// StatsContainer return the stats of a running container // StatsContainer return the stats of a running container
func (s *Sandbox) StatsContainer(containerID string) (ContainerStats, error) { func (s *Sandbox) StatsContainer(ctx context.Context, containerID string) (ContainerStats, error) {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
return ContainerStats{}, err return ContainerStats{}, err
} }
stats, err := c.stats() stats, err := c.stats(ctx)
if err != nil { if err != nil {
return ContainerStats{}, err return ContainerStats{}, err
} }
@ -1378,7 +1380,7 @@ func (s *Sandbox) StatsContainer(containerID string) (ContainerStats, error) {
} }
// Stats returns the stats of a running sandbox // Stats returns the stats of a running sandbox
func (s *Sandbox) Stats() (SandboxStats, error) { func (s *Sandbox) Stats(ctx context.Context) (SandboxStats, error) {
if s.state.CgroupPath == "" { if s.state.CgroupPath == "" {
return SandboxStats{}, fmt.Errorf("sandbox cgroup path is empty") return SandboxStats{}, fmt.Errorf("sandbox cgroup path is empty")
} }
@ -1408,7 +1410,7 @@ func (s *Sandbox) Stats() (SandboxStats, error) {
stats.CgroupStats.CPUStats.CPUUsage.TotalUsage = metrics.CPU.Usage.Total stats.CgroupStats.CPUStats.CPUUsage.TotalUsage = metrics.CPU.Usage.Total
stats.CgroupStats.MemoryStats.Usage.Usage = metrics.Memory.Usage.Usage stats.CgroupStats.MemoryStats.Usage.Usage = metrics.Memory.Usage.Usage
tids, err := s.hypervisor.getThreadIDs() tids, err := s.hypervisor.getThreadIDs(ctx)
if err != nil { if err != nil {
return stats, err return stats, err
} }
@ -1418,7 +1420,7 @@ func (s *Sandbox) Stats() (SandboxStats, error) {
} }
// PauseContainer pauses a running container. // PauseContainer pauses a running container.
func (s *Sandbox) PauseContainer(containerID string) error { func (s *Sandbox) PauseContainer(ctx context.Context, containerID string) error {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
@ -1426,18 +1428,18 @@ func (s *Sandbox) PauseContainer(containerID string) error {
} }
// Pause the container. // Pause the container.
if err := c.pause(); err != nil { if err := c.pause(ctx); err != nil {
return err return err
} }
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return err return err
} }
return nil return nil
} }
// ResumeContainer resumes a paused container. // ResumeContainer resumes a paused container.
func (s *Sandbox) ResumeContainer(containerID string) error { func (s *Sandbox) ResumeContainer(ctx context.Context, containerID string) error {
// Fetch the container. // Fetch the container.
c, err := s.findContainer(containerID) c, err := s.findContainer(containerID)
if err != nil { if err != nil {
@ -1445,11 +1447,11 @@ func (s *Sandbox) ResumeContainer(containerID string) error {
} }
// Resume the container. // Resume the container.
if err := c.resume(); err != nil { if err := c.resume(ctx); err != nil {
return err return err
} }
if err = s.storeSandbox(); err != nil { if err = s.storeSandbox(ctx); err != nil {
return err return err
} }
return nil return nil
@ -1457,17 +1459,17 @@ func (s *Sandbox) ResumeContainer(containerID string) error {
// createContainers registers all containers, create the // createContainers registers all containers, create the
// containers in the guest and starts one shim per container. // containers in the guest and starts one shim per container.
func (s *Sandbox) createContainers() error { func (s *Sandbox) createContainers(ctx context.Context) error {
span, _ := s.trace("createContainers") span, ctx := s.trace(ctx, "createContainers")
defer span.End() defer span.End()
for _, contConfig := range s.config.Containers { for _, contConfig := range s.config.Containers {
c, err := newContainer(s, &contConfig) c, err := newContainer(ctx, s, &contConfig)
if err != nil { if err != nil {
return err return err
} }
if err := c.create(); err != nil { if err := c.create(ctx); err != nil {
return err return err
} }
@ -1478,14 +1480,14 @@ func (s *Sandbox) createContainers() error {
// Update resources after having added containers to the sandbox, since // Update resources after having added containers to the sandbox, since
// container status is requiered to know if more resources should be added. // container status is requiered to know if more resources should be added.
if err := s.updateResources(); err != nil { if err := s.updateResources(ctx); err != nil {
return err return err
} }
if err := s.cgroupsUpdate(); err != nil { if err := s.cgroupsUpdate(ctx); err != nil {
return err return err
} }
if err := s.storeSandbox(); err != nil { if err := s.storeSandbox(ctx); err != nil {
return err return err
} }
@ -1494,7 +1496,7 @@ func (s *Sandbox) createContainers() error {
// Start starts a sandbox. The containers that are making the sandbox // Start starts a sandbox. The containers that are making the sandbox
// will be started. // will be started.
func (s *Sandbox) Start() error { func (s *Sandbox) Start(ctx context.Context) error {
if err := s.state.ValidTransition(s.state.State, types.StateRunning); err != nil { if err := s.state.ValidTransition(s.state.State, types.StateRunning); err != nil {
return err return err
} }
@ -1512,12 +1514,12 @@ func (s *Sandbox) Start() error {
} }
}() }()
for _, c := range s.containers { for _, c := range s.containers {
if startErr = c.start(); startErr != nil { if startErr = c.start(ctx); startErr != nil {
return startErr return startErr
} }
} }
if err := s.storeSandbox(); err != nil { if err := s.storeSandbox(ctx); err != nil {
return err return err
} }
@ -1529,8 +1531,8 @@ func (s *Sandbox) Start() error {
// Stop stops a sandbox. The containers that are making the sandbox // Stop stops a sandbox. The containers that are making the sandbox
// will be destroyed. // will be destroyed.
// When force is true, ignore guest related stop failures. // When force is true, ignore guest related stop failures.
func (s *Sandbox) Stop(force bool) error { func (s *Sandbox) Stop(ctx context.Context, force bool) error {
span, _ := s.trace("stop") span, ctx := s.trace(ctx, "Stop")
defer span.End() defer span.End()
if s.state.State == types.StateStopped { if s.state.State == types.StateStopped {
@ -1543,12 +1545,12 @@ func (s *Sandbox) Stop(force bool) error {
} }
for _, c := range s.containers { for _, c := range s.containers {
if err := c.stop(force); err != nil { if err := c.stop(ctx, force); err != nil {
return err return err
} }
} }
if err := s.stopVM(); err != nil && !force { if err := s.stopVM(ctx); err != nil && !force {
return err return err
} }
@ -1563,16 +1565,16 @@ func (s *Sandbox) Stop(force bool) error {
} }
// Remove the network. // Remove the network.
if err := s.removeNetwork(); err != nil && !force { if err := s.removeNetwork(ctx); err != nil && !force {
return err return err
} }
if err := s.storeSandbox(); err != nil { if err := s.storeSandbox(ctx); err != nil {
return err return err
} }
// Stop communicating with the agent. // Stop communicating with the agent.
if err := s.agent.disconnect(); err != nil && !force { if err := s.agent.disconnect(ctx); err != nil && !force {
return err return err
} }
@ -1640,8 +1642,8 @@ func (s *Sandbox) unsetSandboxBlockIndex(index int) error {
// HotplugAddDevice is used for add a device to sandbox // HotplugAddDevice is used for add a device to sandbox
// Sandbox implement DeviceReceiver interface from device/api/interface.go // Sandbox implement DeviceReceiver interface from device/api/interface.go
func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType) error { func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devType config.DeviceType) error {
span, _ := s.trace("HotplugAddDevice") span, ctx := s.trace(ctx, "HotplugAddDevice")
defer span.End() defer span.End()
if s.config.SandboxCgroupOnly { if s.config.SandboxCgroupOnly {
@ -1649,7 +1651,7 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
// the device cgroup MUST be updated since the hypervisor // the device cgroup MUST be updated since the hypervisor
// will need access to such device // will need access to such device
hdev := device.GetHostPath() hdev := device.GetHostPath()
if err := s.cgroupMgr.AddDevice(hdev); err != nil { if err := s.cgroupMgr.AddDevice(ctx, hdev); err != nil {
s.Logger().WithError(err).WithField("device", hdev). s.Logger().WithError(err).WithField("device", hdev).
Warn("Could not add device to cgroup") Warn("Could not add device to cgroup")
} }
@ -1664,7 +1666,7 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
// adding a group of VFIO devices // adding a group of VFIO devices
for _, dev := range vfioDevices { for _, dev := range vfioDevices {
if _, err := s.hypervisor.hotplugAddDevice(dev, vfioDev); err != nil { if _, err := s.hypervisor.hotplugAddDevice(ctx, dev, vfioDev); err != nil {
s.Logger(). s.Logger().
WithFields(logrus.Fields{ WithFields(logrus.Fields{
"sandbox": s.id, "sandbox": s.id,
@ -1680,14 +1682,14 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
if !ok { if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType) return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
} }
_, err := s.hypervisor.hotplugAddDevice(blockDevice.BlockDrive, blockDev) _, err := s.hypervisor.hotplugAddDevice(ctx, blockDevice.BlockDrive, blockDev)
return err return err
case config.VhostUserBlk: case config.VhostUserBlk:
vhostUserBlkDevice, ok := device.(*drivers.VhostUserBlkDevice) vhostUserBlkDevice, ok := device.(*drivers.VhostUserBlkDevice)
if !ok { if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType) return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
} }
_, err := s.hypervisor.hotplugAddDevice(vhostUserBlkDevice.VhostUserDeviceAttrs, vhostuserDev) _, err := s.hypervisor.hotplugAddDevice(ctx, vhostUserBlkDevice.VhostUserDeviceAttrs, vhostuserDev)
return err return err
case config.DeviceGeneric: case config.DeviceGeneric:
// TODO: what? // TODO: what?
@ -1698,7 +1700,7 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
// HotplugRemoveDevice is used for removing a device from sandbox // HotplugRemoveDevice is used for removing a device from sandbox
// Sandbox implement DeviceReceiver interface from device/api/interface.go // Sandbox implement DeviceReceiver interface from device/api/interface.go
func (s *Sandbox) HotplugRemoveDevice(device api.Device, devType config.DeviceType) error { func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, devType config.DeviceType) error {
defer func() { defer func() {
if s.config.SandboxCgroupOnly { if s.config.SandboxCgroupOnly {
// Remove device from cgroup, the hypervisor // Remove device from cgroup, the hypervisor
@ -1720,7 +1722,7 @@ func (s *Sandbox) HotplugRemoveDevice(device api.Device, devType config.DeviceTy
// remove a group of VFIO devices // remove a group of VFIO devices
for _, dev := range vfioDevices { for _, dev := range vfioDevices {
if _, err := s.hypervisor.hotplugRemoveDevice(dev, vfioDev); err != nil { if _, err := s.hypervisor.hotplugRemoveDevice(ctx, dev, vfioDev); err != nil {
s.Logger().WithError(err). s.Logger().WithError(err).
WithFields(logrus.Fields{ WithFields(logrus.Fields{
"sandbox": s.id, "sandbox": s.id,
@ -1736,14 +1738,14 @@ func (s *Sandbox) HotplugRemoveDevice(device api.Device, devType config.DeviceTy
if !ok { if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType) return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
} }
_, err := s.hypervisor.hotplugRemoveDevice(blockDrive, blockDev) _, err := s.hypervisor.hotplugRemoveDevice(ctx, blockDrive, blockDev)
return err return err
case config.VhostUserBlk: case config.VhostUserBlk:
vhostUserDeviceAttrs, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs) vhostUserDeviceAttrs, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
if !ok { if !ok {
return fmt.Errorf("device type mismatch, expect device type to be %s", devType) return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
} }
_, err := s.hypervisor.hotplugRemoveDevice(vhostUserDeviceAttrs, vhostuserDev) _, err := s.hypervisor.hotplugRemoveDevice(ctx, vhostUserDeviceAttrs, vhostuserDev)
return err return err
case config.DeviceGeneric: case config.DeviceGeneric:
// TODO: what? // TODO: what?
@ -1767,14 +1769,14 @@ func (s *Sandbox) UnsetSandboxBlockIndex(index int) error {
// AppendDevice can only handle vhost user device currently, it adds a // AppendDevice can only handle vhost user device currently, it adds a
// vhost user device to sandbox // vhost user device to sandbox
// Sandbox implement DeviceReceiver interface from device/api/interface.go // Sandbox implement DeviceReceiver interface from device/api/interface.go
func (s *Sandbox) AppendDevice(device api.Device) error { func (s *Sandbox) AppendDevice(ctx context.Context, device api.Device) error {
switch device.DeviceType() { switch device.DeviceType() {
case config.VhostUserSCSI, config.VhostUserNet, config.VhostUserBlk, config.VhostUserFS: case config.VhostUserSCSI, config.VhostUserNet, config.VhostUserBlk, config.VhostUserFS:
return s.hypervisor.addDevice(device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), vhostuserDev) return s.hypervisor.addDevice(ctx, device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), vhostuserDev)
case config.DeviceVFIO: case config.DeviceVFIO:
vfioDevs := device.GetDeviceInfo().([]*config.VFIODev) vfioDevs := device.GetDeviceInfo().([]*config.VFIODev)
for _, d := range vfioDevs { for _, d := range vfioDevs {
return s.hypervisor.addDevice(*d, vfioDev) return s.hypervisor.addDevice(ctx, *d, vfioDev)
} }
default: default:
s.Logger().WithField("device-type", device.DeviceType()). s.Logger().WithField("device-type", device.DeviceType()).
@ -1785,7 +1787,7 @@ func (s *Sandbox) AppendDevice(device api.Device) error {
} }
// AddDevice will add a device to sandbox // AddDevice will add a device to sandbox
func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) { func (s *Sandbox) AddDevice(ctx context.Context, info config.DeviceInfo) (api.Device, error) {
if s.devManager == nil { if s.devManager == nil {
return nil, fmt.Errorf("device manager isn't initialized") return nil, fmt.Errorf("device manager isn't initialized")
} }
@ -1801,12 +1803,12 @@ func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
} }
}() }()
if err = s.devManager.AttachDevice(b.DeviceID(), s); err != nil { if err = s.devManager.AttachDevice(ctx, b.DeviceID(), s); err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
s.devManager.DetachDevice(b.DeviceID(), s) s.devManager.DetachDevice(ctx, b.DeviceID(), s)
} }
}() }()
@ -1819,7 +1821,7 @@ func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
// on the sum of container requests, plus default CPUs for the VM. Similar is done for memory. // on the sum of container requests, plus default CPUs for the VM. Similar is done for memory.
// If changes in memory or CPU are made, the VM will be updated and the agent will online the // If changes in memory or CPU are made, the VM will be updated and the agent will online the
// applicable CPU and memory. // applicable CPU and memory.
func (s *Sandbox) updateResources() error { func (s *Sandbox) updateResources(ctx context.Context) error {
if s == nil { if s == nil {
return errors.New("sandbox is nil") return errors.New("sandbox is nil")
} }
@ -1841,7 +1843,7 @@ func (s *Sandbox) updateResources() error {
// Update VCPUs // Update VCPUs
s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs") s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs")
oldCPUs, newCPUs, err := s.hypervisor.resizeVCPUs(sandboxVCPUs) oldCPUs, newCPUs, err := s.hypervisor.resizeVCPUs(ctx, sandboxVCPUs)
if err != nil { if err != nil {
return err return err
} }
@ -1851,7 +1853,7 @@ func (s *Sandbox) updateResources() error {
if oldCPUs < newCPUs { if oldCPUs < newCPUs {
vcpusAdded := newCPUs - oldCPUs vcpusAdded := newCPUs - oldCPUs
s.Logger().Debugf("Request to onlineCPUMem with %d CPUs", vcpusAdded) s.Logger().Debugf("Request to onlineCPUMem with %d CPUs", vcpusAdded)
if err := s.agent.onlineCPUMem(vcpusAdded, true); err != nil { if err := s.agent.onlineCPUMem(ctx, vcpusAdded, true); err != nil {
return err return err
} }
} }
@ -1859,7 +1861,7 @@ func (s *Sandbox) updateResources() error {
// Update Memory // Update Memory
s.Logger().WithField("memory-sandbox-size-byte", sandboxMemoryByte).Debugf("Request to hypervisor to update memory") s.Logger().WithField("memory-sandbox-size-byte", sandboxMemoryByte).Debugf("Request to hypervisor to update memory")
newMemory, updatedMemoryDevice, err := s.hypervisor.resizeMemory(uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe) newMemory, updatedMemoryDevice, err := s.hypervisor.resizeMemory(ctx, uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe)
if err != nil { if err != nil {
return err return err
} }
@ -1867,11 +1869,11 @@ func (s *Sandbox) updateResources() error {
if s.state.GuestMemoryHotplugProbe && updatedMemoryDevice.addr != 0 { if s.state.GuestMemoryHotplugProbe && updatedMemoryDevice.addr != 0 {
// notify the guest kernel about memory hot-add event, before onlining them // notify the guest kernel about memory hot-add event, before onlining them
s.Logger().Debugf("notify guest kernel memory hot-add event via probe interface, memory device located at 0x%x", updatedMemoryDevice.addr) s.Logger().Debugf("notify guest kernel memory hot-add event via probe interface, memory device located at 0x%x", updatedMemoryDevice.addr)
if err := s.agent.memHotplugByProbe(updatedMemoryDevice.addr, uint32(updatedMemoryDevice.sizeMB), s.state.GuestMemoryBlockSizeMB); err != nil { if err := s.agent.memHotplugByProbe(ctx, updatedMemoryDevice.addr, uint32(updatedMemoryDevice.sizeMB), s.state.GuestMemoryBlockSizeMB); err != nil {
return err return err
} }
} }
if err := s.agent.onlineCPUMem(0, false); err != nil { if err := s.agent.onlineCPUMem(ctx, 0, false); err != nil {
return err return err
} }
return nil return nil
@ -1937,7 +1939,7 @@ func (s *Sandbox) GetHypervisorType() string {
// 1) get the v1constraints cgroup associated with the stored cgroup path // 1) get the v1constraints cgroup associated with the stored cgroup path
// 2) (re-)add hypervisor vCPU threads to the appropriate cgroup // 2) (re-)add hypervisor vCPU threads to the appropriate cgroup
// 3) If we are managing sandbox cgroup, update the v1constraints cgroup size // 3) If we are managing sandbox cgroup, update the v1constraints cgroup size
func (s *Sandbox) cgroupsUpdate() error { func (s *Sandbox) cgroupsUpdate(ctx context.Context) error {
// If Kata is configured for SandboxCgroupOnly, the VMM and its processes are already // If Kata is configured for SandboxCgroupOnly, the VMM and its processes are already
// in the Kata sandbox cgroup (inherited). Check to see if sandbox cpuset needs to be // in the Kata sandbox cgroup (inherited). Check to see if sandbox cpuset needs to be
@ -1965,7 +1967,7 @@ func (s *Sandbox) cgroupsUpdate() error {
return fmt.Errorf("Could not load cgroup %v: %v", s.state.CgroupPath, err) return fmt.Errorf("Could not load cgroup %v: %v", s.state.CgroupPath, err)
} }
if err := s.constrainHypervisor(cgroup); err != nil { if err := s.constrainHypervisor(ctx, cgroup); err != nil {
return err return err
} }
@ -2035,7 +2037,7 @@ func (s *Sandbox) cgroupsDelete() error {
} }
// constrainHypervisor will place the VMM and vCPU threads into cgroups. // constrainHypervisor will place the VMM and vCPU threads into cgroups.
func (s *Sandbox) constrainHypervisor(cgroup cgroups.Cgroup) error { func (s *Sandbox) constrainHypervisor(ctx context.Context, cgroup cgroups.Cgroup) error {
// VMM threads are only placed into the constrained cgroup if SandboxCgroupOnly is being set. // VMM threads are only placed into the constrained cgroup if SandboxCgroupOnly is being set.
// This is the "correct" behavior, but if the parent cgroup isn't set up correctly to take // This is the "correct" behavior, but if the parent cgroup isn't set up correctly to take
// Kata/VMM into account, Kata may fail to boot due to being overconstrained. // Kata/VMM into account, Kata may fail to boot due to being overconstrained.
@ -2078,7 +2080,7 @@ func (s *Sandbox) constrainHypervisor(cgroup cgroups.Cgroup) error {
// when new container joins, new CPU could be hotplugged, so we // when new container joins, new CPU could be hotplugged, so we
// have to query fresh vcpu info from hypervisor every time. // have to query fresh vcpu info from hypervisor every time.
tids, err := s.hypervisor.getThreadIDs() tids, err := s.hypervisor.getThreadIDs(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err) return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
} }
@ -2242,8 +2244,8 @@ func (s *Sandbox) GetPatchedOCISpec() *specs.Spec {
return nil return nil
} }
func (s *Sandbox) GetOOMEvent() (string, error) { func (s *Sandbox) GetOOMEvent(ctx context.Context) (string, error) {
return s.agent.getOOMEvent() return s.agent.getOOMEvent(ctx)
} }
func (s *Sandbox) GetAgentURL() (string, error) { func (s *Sandbox) GetAgentURL() (string, error) {
@ -2310,7 +2312,7 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
// This sandbox already exists, we don't need to recreate the containers in the guest. // This sandbox already exists, we don't need to recreate the containers in the guest.
// We only need to fetch the containers from storage and create the container structs. // We only need to fetch the containers from storage and create the container structs.
if err := sandbox.fetchContainers(); err != nil { if err := sandbox.fetchContainers(ctx); err != nil {
return nil, err return nil, err
} }
@ -2321,7 +2323,7 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
// adds them to the sandbox. It does not create the containers // adds them to the sandbox. It does not create the containers
// in the guest. This should only be used when fetching a // in the guest. This should only be used when fetching a
// sandbox that already exists. // sandbox that already exists.
func (s *Sandbox) fetchContainers() error { func (s *Sandbox) fetchContainers(ctx context.Context) error {
for i, contConfig := range s.config.Containers { for i, contConfig := range s.config.Containers {
// Add spec from bundle path // Add spec from bundle path
spec, err := compatoci.GetContainerSpec(contConfig.Annotations) spec, err := compatoci.GetContainerSpec(contConfig.Annotations)
@ -2331,7 +2333,7 @@ func (s *Sandbox) fetchContainers() error {
contConfig.CustomSpec = &spec contConfig.CustomSpec = &spec
s.config.Containers[i] = contConfig s.config.Containers[i] = contConfig
c, err := newContainer(s, &s.config.Containers[i]) c, err := newContainer(ctx, s, &s.config.Containers[i])
if err != nil { if err != nil {
return err return err
} }

View File

@ -6,6 +6,8 @@
package virtcontainers package virtcontainers
import ( import (
"context"
mutils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils" mutils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -126,8 +128,8 @@ func (s *Sandbox) UpdateRuntimeMetrics() error {
return nil return nil
} }
func (s *Sandbox) GetAgentMetrics() (string, error) { func (s *Sandbox) GetAgentMetrics(ctx context.Context) (string, error) {
r, err := s.agent.getAgentMetrics(&grpc.GetMetricsRequest{}) r, err := s.agent.getAgentMetrics(ctx, &grpc.GetMetricsRequest{})
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -326,7 +326,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
} }
// persist to disk // persist to disk
err = p.storeSandbox() err = p.storeSandbox(p.ctx)
assert.NoError(err) assert.NoError(err)
newSandboxState := types.SandboxState{ newSandboxState := types.SandboxState{
@ -876,12 +876,12 @@ func TestCreateContainer(t *testing.T) {
contID := "999" contID := "999"
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1") assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1")
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.NotNil(t, err, "Should failed to create a duplicated container") assert.NotNil(t, err, "Should failed to create a duplicated container")
assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1") assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1")
} }
@ -896,7 +896,7 @@ func TestDeleteContainer(t *testing.T) {
assert.NotNil(t, err, "Deletng non-existing container should fail") assert.NotNil(t, err, "Deletng non-existing container should fail")
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
_, err = s.DeleteContainer(contID) _, err = s.DeleteContainer(contID)
@ -909,17 +909,17 @@ func TestStartContainer(t *testing.T) {
defer cleanUp() defer cleanUp()
contID := "999" contID := "999"
_, err = s.StartContainer(contID) _, err = s.StartContainer(context.Background(), contID)
assert.NotNil(t, err, "Starting non-existing container should fail") assert.NotNil(t, err, "Starting non-existing container should fail")
err = s.Start() err = s.Start()
assert.Nil(t, err, "Failed to start sandbox: %v", err) assert.Nil(t, err, "Failed to start sandbox: %v", err)
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
_, err = s.StartContainer(contID) _, err = s.StartContainer(context.Background(), contID)
assert.Nil(t, err, "Start container failed: %v", err) assert.Nil(t, err, "Start container failed: %v", err)
} }
@ -933,7 +933,7 @@ func TestStatusContainer(t *testing.T) {
assert.NotNil(t, err, "Status non-existing container should fail") assert.NotNil(t, err, "Status non-existing container should fail")
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
_, err = s.StatusContainer(contID) _, err = s.StatusContainer(contID)
@ -962,7 +962,7 @@ func TestEnterContainer(t *testing.T) {
assert.NotNil(t, err, "Entering non-existing container should fail") assert.NotNil(t, err, "Entering non-existing container should fail")
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
_, _, err = s.EnterContainer(contID, cmd) _, _, err = s.EnterContainer(contID, cmd)
@ -987,7 +987,7 @@ func TestDeleteStoreWhenCreateContainerFail(t *testing.T) {
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
contConfig.RootFs = RootFs{Target: "", Mounted: true} contConfig.RootFs = RootFs{Target: "", Mounted: true}
s.state.CgroupPath = filepath.Join(testDir, "bad-cgroup") s.state.CgroupPath = filepath.Join(testDir, "bad-cgroup")
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.NotNil(t, err, "Should fail to create container due to wrong cgroup") assert.NotNil(t, err, "Should fail to create container due to wrong cgroup")
} }
@ -1051,13 +1051,13 @@ func TestWaitProcess(t *testing.T) {
assert.NotNil(t, err, "Wait process in non-existing container should fail") assert.NotNil(t, err, "Wait process in non-existing container should fail")
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
_, err = s.WaitProcess(contID, execID) _, err = s.WaitProcess(contID, execID)
assert.Nil(t, err, "Wait process in ready container failed: %v", err) assert.Nil(t, err, "Wait process in ready container failed: %v", err)
_, err = s.StartContainer(contID) _, err = s.StartContainer(context.Background(), contID)
assert.Nil(t, err, "Start container failed: %v", err) assert.Nil(t, err, "Start container failed: %v", err)
_, err = s.WaitProcess(contID, execID) _, err = s.WaitProcess(contID, execID)
@ -1081,13 +1081,13 @@ func TestSignalProcess(t *testing.T) {
assert.NotNil(t, err, "Wait process in non-existing container should fail") assert.NotNil(t, err, "Wait process in non-existing container should fail")
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
err = s.SignalProcess(contID, execID, syscall.SIGKILL, true) err = s.SignalProcess(contID, execID, syscall.SIGKILL, true)
assert.Nil(t, err, "Wait process in ready container failed: %v", err) assert.Nil(t, err, "Wait process in ready container failed: %v", err)
_, err = s.StartContainer(contID) _, err = s.StartContainer(context.Background(), contID)
assert.Nil(t, err, "Start container failed: %v", err) assert.Nil(t, err, "Start container failed: %v", err)
err = s.SignalProcess(contID, execID, syscall.SIGKILL, false) err = s.SignalProcess(contID, execID, syscall.SIGKILL, false)
@ -1111,13 +1111,13 @@ func TestWinsizeProcess(t *testing.T) {
assert.NotNil(t, err, "Winsize process in non-existing container should fail") assert.NotNil(t, err, "Winsize process in non-existing container should fail")
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
err = s.WinsizeProcess(contID, execID, 100, 200) err = s.WinsizeProcess(contID, execID, 100, 200)
assert.Nil(t, err, "Winsize process in ready container failed: %v", err) assert.Nil(t, err, "Winsize process in ready container failed: %v", err)
_, err = s.StartContainer(contID) _, err = s.StartContainer(context.Background(), contID)
assert.Nil(t, err, "Start container failed: %v", err) assert.Nil(t, err, "Start container failed: %v", err)
err = s.WinsizeProcess(contID, execID, 100, 200) err = s.WinsizeProcess(contID, execID, 100, 200)
@ -1141,13 +1141,13 @@ func TestContainerProcessIOStream(t *testing.T) {
assert.NotNil(t, err, "Winsize process in non-existing container should fail") assert.NotNil(t, err, "Winsize process in non-existing container should fail")
contConfig := newTestContainerConfigNoop(contID) contConfig := newTestContainerConfigNoop(contID)
_, err = s.CreateContainer(contConfig) _, err = s.CreateContainer(context.Background(), contConfig)
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err) assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
_, _, _, err = s.IOStream(contID, execID) _, _, _, err = s.IOStream(contID, execID)
assert.Nil(t, err, "Winsize process in ready container failed: %v", err) assert.Nil(t, err, "Winsize process in ready container failed: %v", err)
_, err = s.StartContainer(contID) _, err = s.StartContainer(context.Background(), contID)
assert.Nil(t, err, "Start container failed: %v", err) assert.Nil(t, err, "Start container failed: %v", err)
_, _, _, err = s.IOStream(contID, execID) _, _, _, err = s.IOStream(contID, execID)
@ -1204,37 +1204,37 @@ func TestAttachBlockDevice(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, index, 0) assert.Equal(t, index, 0)
err = device.Attach(sandbox) err = device.Attach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
index, err = sandbox.getAndSetSandboxBlockIndex() index, err = sandbox.getAndSetSandboxBlockIndex()
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, index, 2) assert.Equal(t, index, 2)
err = device.Detach(sandbox) err = device.Detach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
index, err = sandbox.getAndSetSandboxBlockIndex() index, err = sandbox.getAndSetSandboxBlockIndex()
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, index, 1) assert.Equal(t, index, 1)
container.state.State = types.StateReady container.state.State = types.StateReady
err = device.Attach(sandbox) err = device.Attach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(sandbox) err = device.Detach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
container.sandbox.config.HypervisorConfig.BlockDeviceDriver = config.VirtioSCSI container.sandbox.config.HypervisorConfig.BlockDeviceDriver = config.VirtioSCSI
err = device.Attach(sandbox) err = device.Attach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(sandbox) err = device.Detach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
container.state.State = types.StateReady container.state.State = types.StateReady
err = device.Attach(sandbox) err = device.Attach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
err = device.Detach(sandbox) err = device.Detach(context.Background(), sandbox)
assert.Nil(t, err) assert.Nil(t, err)
} }
@ -1283,7 +1283,7 @@ func TestPreAddDevice(t *testing.T) {
} }
// Add a mount device for a mountpoint before container's creation // Add a mount device for a mountpoint before container's creation
dev, err := sandbox.AddDevice(deviceInfo) dev, err := sandbox.AddDevice(context.Background(), deviceInfo)
assert.Nil(t, err) assert.Nil(t, err)
// in Frakti use case, here we will create and start the container // in Frakti use case, here we will create and start the container
@ -1419,7 +1419,7 @@ func TestSandboxUpdateResources(t *testing.T) {
nil) nil)
assert.NoError(t, err) assert.NoError(t, err)
err = s.updateResources() err = s.updateResources(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
containerMemLimit := int64(1000) containerMemLimit := int64(1000)
@ -1437,7 +1437,7 @@ func TestSandboxUpdateResources(t *testing.T) {
c.Resources.CPU.Period = &containerCPUPeriod c.Resources.CPU.Period = &containerCPUPeriod
c.Resources.CPU.Quota = &containerCPUQouta c.Resources.CPU.Quota = &containerCPUQouta
} }
err = s.updateResources() err = s.updateResources(context.Background())
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/ns"
@ -67,12 +68,12 @@ func (endpoint *TapEndpoint) SetProperties(properties NetworkInfo) {
} }
// Attach for tap endpoint adds the tap interface to the hypervisor. // Attach for tap endpoint adds the tap interface to the hypervisor.
func (endpoint *TapEndpoint) Attach(s *Sandbox) error { func (endpoint *TapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
return fmt.Errorf("TapEndpoint does not support Attach, if you're using docker please use --net none") return fmt.Errorf("TapEndpoint does not support Attach, if you're using docker please use --net none")
} }
// Detach for the tap endpoint tears down the tap // Detach for the tap endpoint tears down the tap
func (endpoint *TapEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *TapEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
if !netNsCreated && netNsPath != "" { if !netNsCreated && netNsPath != "" {
return nil return nil
} }
@ -84,14 +85,14 @@ func (endpoint *TapEndpoint) Detach(netNsCreated bool, netNsPath string) error {
} }
// HotAttach for the tap endpoint uses hot plug device // HotAttach for the tap endpoint uses hot plug device
func (endpoint *TapEndpoint) HotAttach(h hypervisor) error { func (endpoint *TapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
networkLogger().Info("Hot attaching tap endpoint") networkLogger().Info("Hot attaching tap endpoint")
if err := tapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil { if err := tapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil {
networkLogger().WithError(err).Error("Error bridging tap ep") networkLogger().WithError(err).Error("Error bridging tap ep")
return err return err
} }
if _, err := h.hotplugAddDevice(endpoint, netDev); err != nil { if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
networkLogger().WithError(err).Error("Error attach tap ep") networkLogger().WithError(err).Error("Error attach tap ep")
return err return err
} }
@ -99,7 +100,7 @@ func (endpoint *TapEndpoint) HotAttach(h hypervisor) error {
} }
// HotDetach for the tap endpoint uses hot pull device // HotDetach for the tap endpoint uses hot pull device
func (endpoint *TapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *TapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
networkLogger().Info("Hot detaching tap endpoint") networkLogger().Info("Hot detaching tap endpoint")
if err := doNetNS(netNsPath, func(_ ns.NetNS) error { if err := doNetNS(netNsPath, func(_ ns.NetNS) error {
return unTapNetwork(endpoint.TapInterface.TAPIface.Name) return unTapNetwork(endpoint.TapInterface.TAPIface.Name)
@ -107,7 +108,7 @@ func (endpoint *TapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPat
networkLogger().WithError(err).Warn("Error un-bridging tap ep") networkLogger().WithError(err).Warn("Error un-bridging tap ep")
} }
if _, err := h.hotplugRemoveDevice(endpoint, netDev); err != nil { if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
networkLogger().WithError(err).Error("Error detach tap ep") networkLogger().WithError(err).Error("Error detach tap ep")
return err return err
} }

View File

@ -7,6 +7,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"net" "net"
@ -69,17 +70,17 @@ func (endpoint *TuntapEndpoint) SetProperties(properties NetworkInfo) {
} }
// Attach for tap endpoint adds the tap interface to the hypervisor. // Attach for tap endpoint adds the tap interface to the hypervisor.
func (endpoint *TuntapEndpoint) Attach(s *Sandbox) error { func (endpoint *TuntapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
h := s.hypervisor h := s.hypervisor
if err := xConnectVMNetwork(endpoint, h); err != nil { if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
networkLogger().WithError(err).Error("Error bridging virtual endpoint") networkLogger().WithError(err).Error("Error bridging virtual endpoint")
return err return err
} }
return h.addDevice(endpoint, netDev) return h.addDevice(ctx, endpoint, netDev)
} }
// Detach for the tap endpoint tears down the tap // Detach for the tap endpoint tears down the tap
func (endpoint *TuntapEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *TuntapEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
if !netNsCreated && netNsPath != "" { if !netNsCreated && netNsPath != "" {
return nil return nil
} }
@ -91,14 +92,14 @@ func (endpoint *TuntapEndpoint) Detach(netNsCreated bool, netNsPath string) erro
} }
// HotAttach for the tap endpoint uses hot plug device // HotAttach for the tap endpoint uses hot plug device
func (endpoint *TuntapEndpoint) HotAttach(h hypervisor) error { func (endpoint *TuntapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
networkLogger().Info("Hot attaching tap endpoint") networkLogger().Info("Hot attaching tap endpoint")
if err := tuntapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil { if err := tuntapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil {
networkLogger().WithError(err).Error("Error bridging tap ep") networkLogger().WithError(err).Error("Error bridging tap ep")
return err return err
} }
if _, err := h.hotplugAddDevice(endpoint, netDev); err != nil { if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
networkLogger().WithError(err).Error("Error attach tap ep") networkLogger().WithError(err).Error("Error attach tap ep")
return err return err
} }
@ -106,7 +107,7 @@ func (endpoint *TuntapEndpoint) HotAttach(h hypervisor) error {
} }
// HotDetach for the tap endpoint uses hot pull device // HotDetach for the tap endpoint uses hot pull device
func (endpoint *TuntapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *TuntapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
networkLogger().Info("Hot detaching tap endpoint") networkLogger().Info("Hot detaching tap endpoint")
if err := doNetNS(netNsPath, func(_ ns.NetNS) error { if err := doNetNS(netNsPath, func(_ ns.NetNS) error {
return unTuntapNetwork(endpoint.TuntapInterface.TAPIface.Name) return unTuntapNetwork(endpoint.TuntapInterface.TAPIface.Name)
@ -114,7 +115,7 @@ func (endpoint *TuntapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNs
networkLogger().WithError(err).Warn("Error un-bridging tap ep") networkLogger().WithError(err).Warn("Error un-bridging tap ep")
} }
if _, err := h.hotplugRemoveDevice(endpoint, netDev); err != nil { if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
networkLogger().WithError(err).Error("Error detach tap ep") networkLogger().WithError(err).Error("Error detach tap ep")
return err return err
} }

View File

@ -5,7 +5,10 @@
package types package types
import "fmt" import (
"context"
"fmt"
)
// Type represents a type of bus and bridge. // Type represents a type of bus and bridge.
type Type string type Type string
@ -64,7 +67,7 @@ func NewBridge(bt Type, id string, devices map[uint32]string, addr int) Bridge {
} }
} }
func (b *Bridge) AddDevice(ID string) (uint32, error) { func (b *Bridge) AddDevice(ctx context.Context, ID string) (uint32, error) {
var addr uint32 var addr uint32
// looking for the first available address // looking for the first available address

View File

@ -6,6 +6,7 @@
package types package types
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
@ -18,7 +19,7 @@ func testAddRemoveDevice(t *testing.T, b *Bridge) {
// add device // add device
devID := "abc123" devID := "abc123"
addr, err := b.AddDevice(devID) addr, err := b.AddDevice(context.Background(), devID)
assert.NoError(err) assert.NoError(err)
if addr < 1 { if addr < 1 {
assert.Fail("address cannot be less than 1") assert.Fail("address cannot be less than 1")
@ -36,7 +37,7 @@ func testAddRemoveDevice(t *testing.T, b *Bridge) {
for i := uint32(1); i <= b.MaxCapacity; i++ { for i := uint32(1); i <= b.MaxCapacity; i++ {
b.Devices[i] = fmt.Sprintf("%d", i) b.Devices[i] = fmt.Sprintf("%d", i)
} }
addr, err = b.AddDevice(devID) addr, err = b.AddDevice(context.Background(), devID)
assert.Error(err) assert.Error(err)
if addr != 0 { if addr != 0 {
assert.Fail("address should be 0") assert.Fail("address should be 0")

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"github.com/containernetworking/plugins/pkg/ns" "github.com/containernetworking/plugins/pkg/ns"
@ -90,19 +91,19 @@ func (endpoint *VethEndpoint) SetProperties(properties NetworkInfo) {
// Attach for veth endpoint bridges the network pair and adds the // Attach for veth endpoint bridges the network pair and adds the
// tap interface of the network pair to the hypervisor. // tap interface of the network pair to the hypervisor.
func (endpoint *VethEndpoint) Attach(s *Sandbox) error { func (endpoint *VethEndpoint) Attach(ctx context.Context, s *Sandbox) error {
h := s.hypervisor h := s.hypervisor
if err := xConnectVMNetwork(endpoint, h); err != nil { if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
networkLogger().WithError(err).Error("Error bridging virtual endpoint") networkLogger().WithError(err).Error("Error bridging virtual endpoint")
return err return err
} }
return h.addDevice(endpoint, netDev) return h.addDevice(ctx, endpoint, netDev)
} }
// Detach for the veth endpoint tears down the tap and bridge // Detach for the veth endpoint tears down the tap and bridge
// created for the veth interface. // created for the veth interface.
func (endpoint *VethEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *VethEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
// The network namespace would have been deleted at this point // The network namespace would have been deleted at this point
// if it has not been created by virtcontainers. // if it has not been created by virtcontainers.
if !netNsCreated { if !netNsCreated {
@ -115,13 +116,13 @@ func (endpoint *VethEndpoint) Detach(netNsCreated bool, netNsPath string) error
} }
// HotAttach for the veth endpoint uses hot plug device // HotAttach for the veth endpoint uses hot plug device
func (endpoint *VethEndpoint) HotAttach(h hypervisor) error { func (endpoint *VethEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
if err := xConnectVMNetwork(endpoint, h); err != nil { if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
networkLogger().WithError(err).Error("Error bridging virtual ep") networkLogger().WithError(err).Error("Error bridging virtual ep")
return err return err
} }
if _, err := h.hotplugAddDevice(endpoint, netDev); err != nil { if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
networkLogger().WithError(err).Error("Error attach virtual ep") networkLogger().WithError(err).Error("Error attach virtual ep")
return err return err
} }
@ -129,7 +130,7 @@ func (endpoint *VethEndpoint) HotAttach(h hypervisor) error {
} }
// HotDetach for the veth endpoint uses hot pull device // HotDetach for the veth endpoint uses hot pull device
func (endpoint *VethEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *VethEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
if !netNsCreated { if !netNsCreated {
return nil return nil
} }
@ -140,7 +141,7 @@ func (endpoint *VethEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPa
networkLogger().WithError(err).Warn("Error un-bridging virtual ep") networkLogger().WithError(err).Warn("Error un-bridging virtual ep")
} }
if _, err := h.hotplugRemoveDevice(endpoint, netDev); err != nil { if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
networkLogger().WithError(err).Error("Error detach virtual ep") networkLogger().WithError(err).Error("Error detach virtual ep")
return err return err
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"os" "os"
@ -75,7 +76,7 @@ func (endpoint *VhostUserEndpoint) NetworkPair() *NetworkInterfacePair {
} }
// Attach for vhostuser endpoint // Attach for vhostuser endpoint
func (endpoint *VhostUserEndpoint) Attach(s *Sandbox) error { func (endpoint *VhostUserEndpoint) Attach(ctx context.Context, s *Sandbox) error {
// Generate a unique ID to be used for hypervisor commandline fields // Generate a unique ID to be used for hypervisor commandline fields
randBytes, err := utils.GenerateRandomBytes(8) randBytes, err := utils.GenerateRandomBytes(8)
if err != nil { if err != nil {
@ -90,21 +91,21 @@ func (endpoint *VhostUserEndpoint) Attach(s *Sandbox) error {
Type: config.VhostUserNet, Type: config.VhostUserNet,
} }
return s.hypervisor.addDevice(d, vhostuserDev) return s.hypervisor.addDevice(ctx, d, vhostuserDev)
} }
// Detach for vhostuser endpoint // Detach for vhostuser endpoint
func (endpoint *VhostUserEndpoint) Detach(netNsCreated bool, netNsPath string) error { func (endpoint *VhostUserEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
return nil return nil
} }
// HotAttach for vhostuser endpoint not supported yet // HotAttach for vhostuser endpoint not supported yet
func (endpoint *VhostUserEndpoint) HotAttach(h hypervisor) error { func (endpoint *VhostUserEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
return fmt.Errorf("VhostUserEndpoint does not support Hot attach") return fmt.Errorf("VhostUserEndpoint does not support Hot attach")
} }
// HotDetach for vhostuser endpoint not supported yet // HotDetach for vhostuser endpoint not supported yet
func (endpoint *VhostUserEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error { func (endpoint *VhostUserEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
return fmt.Errorf("VhostUserEndpoint does not support Hot detach") return fmt.Errorf("VhostUserEndpoint does not support Hot detach")
} }

View File

@ -6,6 +6,7 @@
package virtcontainers package virtcontainers
import ( import (
"context"
"fmt" "fmt"
"net" "net"
"os" "os"
@ -82,7 +83,7 @@ func TestVhostUserEndpointAttach(t *testing.T) {
hypervisor: &mockHypervisor{}, hypervisor: &mockHypervisor{},
} }
err := v.Attach(s) err := v.Attach(context.Background(), s)
assert.NoError(err) assert.NoError(err)
} }
@ -96,7 +97,7 @@ func TestVhostUserEndpoint_HotAttach(t *testing.T) {
h := &mockHypervisor{} h := &mockHypervisor{}
err := v.HotAttach(h) err := v.HotAttach(context.Background(), h)
assert.Error(err) assert.Error(err)
} }
@ -110,7 +111,7 @@ func TestVhostUserEndpoint_HotDetach(t *testing.T) {
h := &mockHypervisor{} h := &mockHypervisor{}
err := v.HotDetach(h, true, "") err := v.HotDetach(context.Background(), h, true, "")
assert.Error(err) assert.Error(err)
} }

View File

@ -35,7 +35,7 @@ type Virtiofsd interface {
// Start virtiofsd, return pid of virtiofsd process // Start virtiofsd, return pid of virtiofsd process
Start(context.Context) (pid int, err error) Start(context.Context) (pid int, err error)
// Stop virtiofsd process // Stop virtiofsd process
Stop() error Stop(context.Context) error
} }
// Helper function to check virtiofsd is serving // Helper function to check virtiofsd is serving
@ -84,7 +84,7 @@ func (v *virtiofsd) getSocketFD() (*os.File, error) {
// Start the virtiofsd daemon // Start the virtiofsd daemon
func (v *virtiofsd) Start(ctx context.Context) (int, error) { func (v *virtiofsd) Start(ctx context.Context) (int, error) {
span, _ := v.trace("Start") span, _ := v.trace(ctx, "Start")
defer span.End() defer span.End()
pid := 0 pid := 0
@ -131,8 +131,8 @@ func (v *virtiofsd) Start(ctx context.Context) (int, error) {
return pid, socketFD.Close() return pid, socketFD.Close()
} }
func (v *virtiofsd) Stop() error { func (v *virtiofsd) Stop(ctx context.Context) error {
if err := v.kill(); err != nil { if err := v.kill(ctx); err != nil {
return nil return nil
} }
@ -204,13 +204,13 @@ func (v *virtiofsd) Logger() *log.Entry {
return virtLog.WithField("subsystem", "virtiofsd") return virtLog.WithField("subsystem", "virtiofsd")
} }
func (v *virtiofsd) trace(name string) (otelTrace.Span, context.Context) { func (v *virtiofsd) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
if v.ctx == nil { if parent == nil {
v.ctx = context.Background() parent = context.Background()
} }
tracer := otel.Tracer("kata") tracer := otel.Tracer("kata")
ctx, span := tracer.Start(v.ctx, name) ctx, span := tracer.Start(parent, name)
span.SetAttributes(label.Key("subsystem").String("virtiofds")) span.SetAttributes(label.Key("subsystem").String("virtiofds"))
return span, ctx return span, ctx
@ -259,8 +259,8 @@ func waitVirtiofsReady(cmd *exec.Cmd, stderr io.ReadCloser, debug bool) error {
return err return err
} }
func (v *virtiofsd) kill() (err error) { func (v *virtiofsd) kill(ctx context.Context) (err error) {
span, _ := v.trace("kill") span, _ := v.trace(ctx, "kill")
defer span.End() defer span.End()
if v.PID == 0 { if v.PID == 0 {

View File

@ -120,7 +120,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
agent := newAagentFunc() agent := newAagentFunc()
vmSharePath := buildVMSharePath(id, store.RunVMStoragePath()) vmSharePath := buildVMSharePath(id, store.RunVMStoragePath())
err = agent.configure(hypervisor, id, vmSharePath, config.AgentConfig) err = agent.configure(ctx, hypervisor, id, vmSharePath, config.AgentConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -130,14 +130,14 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
} }
// 3. boot up guest vm // 3. boot up guest vm
if err = hypervisor.startSandbox(vmStartTimeout); err != nil { if err = hypervisor.startSandbox(ctx, vmStartTimeout); err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
virtLog.WithField("vm", id).WithError(err).Info("clean up vm") virtLog.WithField("vm", id).WithError(err).Info("clean up vm")
hypervisor.stopSandbox() hypervisor.stopSandbox(ctx)
} }
}() }()
@ -145,7 +145,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
// VMs booted from template are paused, do not check // VMs booted from template are paused, do not check
if !config.HypervisorConfig.BootFromTemplate { if !config.HypervisorConfig.BootFromTemplate {
virtLog.WithField("vm", id).Info("check agent status") virtLog.WithField("vm", id).Info("check agent status")
err = agent.check() err = agent.check(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -213,9 +213,9 @@ func (v *VM) logger() logrus.FieldLogger {
} }
// Pause pauses a VM. // Pause pauses a VM.
func (v *VM) Pause() error { func (v *VM) Pause(ctx context.Context) error {
v.logger().Info("pause vm") v.logger().Info("pause vm")
return v.hypervisor.pauseSandbox() return v.hypervisor.pauseSandbox(ctx)
} }
// Save saves a VM to persistent disk. // Save saves a VM to persistent disk.
@ -225,22 +225,22 @@ func (v *VM) Save() error {
} }
// Resume resumes a paused VM. // Resume resumes a paused VM.
func (v *VM) Resume() error { func (v *VM) Resume(ctx context.Context) error {
v.logger().Info("resume vm") v.logger().Info("resume vm")
return v.hypervisor.resumeSandbox() return v.hypervisor.resumeSandbox(ctx)
} }
// Start kicks off a configured VM. // Start kicks off a configured VM.
func (v *VM) Start() error { func (v *VM) Start(ctx context.Context) error {
v.logger().Info("start vm") v.logger().Info("start vm")
return v.hypervisor.startSandbox(vmStartTimeout) return v.hypervisor.startSandbox(ctx, vmStartTimeout)
} }
// Disconnect agent connections to a VM // Disconnect agent connections to a VM
func (v *VM) Disconnect() error { func (v *VM) Disconnect(ctx context.Context) error {
v.logger().Info("kill vm") v.logger().Info("kill vm")
if err := v.agent.disconnect(); err != nil { if err := v.agent.disconnect(ctx); err != nil {
v.logger().WithError(err).Error("failed to disconnect agent") v.logger().WithError(err).Error("failed to disconnect agent")
} }
@ -248,10 +248,10 @@ func (v *VM) Disconnect() error {
} }
// Stop stops a VM process. // Stop stops a VM process.
func (v *VM) Stop() error { func (v *VM) Stop(ctx context.Context) error {
v.logger().Info("stop vm") v.logger().Info("stop vm")
if err := v.hypervisor.stopSandbox(); err != nil { if err := v.hypervisor.stopSandbox(ctx); err != nil {
return err return err
} }
@ -259,10 +259,10 @@ func (v *VM) Stop() error {
} }
// AddCPUs adds num of CPUs to the VM. // AddCPUs adds num of CPUs to the VM.
func (v *VM) AddCPUs(num uint32) error { func (v *VM) AddCPUs(ctx context.Context, num uint32) error {
if num > 0 { if num > 0 {
v.logger().Infof("hot adding %d vCPUs", num) v.logger().Infof("hot adding %d vCPUs", num)
if _, err := v.hypervisor.hotplugAddDevice(num, cpuDev); err != nil { if _, err := v.hypervisor.hotplugAddDevice(ctx, num, cpuDev); err != nil {
return err return err
} }
v.cpuDelta += num v.cpuDelta += num
@ -273,11 +273,11 @@ func (v *VM) AddCPUs(num uint32) error {
} }
// AddMemory adds numMB of memory to the VM. // AddMemory adds numMB of memory to the VM.
func (v *VM) AddMemory(numMB uint32) error { func (v *VM) AddMemory(ctx context.Context, numMB uint32) error {
if numMB > 0 { if numMB > 0 {
v.logger().Infof("hot adding %d MB memory", numMB) v.logger().Infof("hot adding %d MB memory", numMB)
dev := &memoryDevice{1, int(numMB), 0, false} dev := &memoryDevice{1, int(numMB), 0, false}
if _, err := v.hypervisor.hotplugAddDevice(dev, memoryDev); err != nil { if _, err := v.hypervisor.hotplugAddDevice(ctx, dev, memoryDev); err != nil {
return err return err
} }
} }
@ -286,9 +286,9 @@ func (v *VM) AddMemory(numMB uint32) error {
} }
// OnlineCPUMemory puts the hotplugged CPU and memory online. // OnlineCPUMemory puts the hotplugged CPU and memory online.
func (v *VM) OnlineCPUMemory() error { func (v *VM) OnlineCPUMemory(ctx context.Context) error {
v.logger().Infof("online CPU %d and memory", v.cpuDelta) v.logger().Infof("online CPU %d and memory", v.cpuDelta)
err := v.agent.onlineCPUMem(v.cpuDelta, false) err := v.agent.onlineCPUMem(ctx, v.cpuDelta, false)
if err == nil { if err == nil {
v.cpuDelta = 0 v.cpuDelta = 0
} }
@ -298,7 +298,7 @@ func (v *VM) OnlineCPUMemory() error {
// ReseedRNG adds random entropy to guest random number generator // ReseedRNG adds random entropy to guest random number generator
// and reseeds it. // and reseeds it.
func (v *VM) ReseedRNG() error { func (v *VM) ReseedRNG(ctx context.Context) error {
v.logger().Infof("reseed guest random number generator") v.logger().Infof("reseed guest random number generator")
urandomDev := "/dev/urandom" urandomDev := "/dev/urandom"
data := make([]byte, 512) data := make([]byte, 512)
@ -313,14 +313,14 @@ func (v *VM) ReseedRNG() error {
return err return err
} }
return v.agent.reseedRNG(data) return v.agent.reseedRNG(ctx, data)
} }
// SyncTime syncs guest time with host time. // SyncTime syncs guest time with host time.
func (v *VM) SyncTime() error { func (v *VM) SyncTime(ctx context.Context) error {
now := time.Now() now := time.Now()
v.logger().WithField("time", now).Infof("sync guest time") v.logger().WithField("time", now).Infof("sync guest time")
return v.agent.setGuestDateTime(now) return v.agent.setGuestDateTime(ctx, now)
} }
func (v *VM) assignSandbox(s *Sandbox) error { func (v *VM) assignSandbox(s *Sandbox) error {
@ -364,8 +364,8 @@ func (v *VM) assignSandbox(s *Sandbox) error {
} }
// ToGrpc convert VM struct to Grpc format pb.GrpcVM. // ToGrpc convert VM struct to Grpc format pb.GrpcVM.
func (v *VM) ToGrpc(config VMConfig) (*pb.GrpcVM, error) { func (v *VM) ToGrpc(ctx context.Context, config VMConfig) (*pb.GrpcVM, error) {
hJSON, err := v.hypervisor.toGrpc() hJSON, err := v.hypervisor.toGrpc(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }