mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-29 20:24:31 +00:00
runtime: Fix ordering of trace spans
A significant number of trace calls did not use a parent context that would create proper span ordering in trace output. Add local context to functions for use in trace calls to facilitate proper span ordering. Additionally, change whether trace function returns context in some functions in virtcontainers and use existing context rather than background context in bindMount() so that span exists as a child of a parent span. Fixes #1355 Signed-off-by: Chelsea Mafrica <chelsea.e.mafrica@intel.com>
This commit is contained in:
parent
50f317dcff
commit
6b0dc60dda
@ -73,7 +73,7 @@ func (s *cacheServer) GetBaseVM(ctx context.Context, empty *types.Empty) (*pb.Gr
|
||||
return nil, errors.Wrapf(err, "failed to GetBaseVM")
|
||||
}
|
||||
|
||||
return vm.ToGrpc(config)
|
||||
return vm.ToGrpc(ctx, config)
|
||||
}
|
||||
|
||||
func (s *cacheServer) quit() {
|
||||
|
@ -75,7 +75,7 @@ func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest) (*con
|
||||
|
||||
// create span
|
||||
var span otelTrace.Span
|
||||
span, s.ctx = trace(s.ctx, "create")
|
||||
span, s.ctx = trace(ctx, "create")
|
||||
defer span.End()
|
||||
|
||||
if rootFs.Mounted, err = checkAndMount(s, r); err != nil {
|
||||
@ -111,7 +111,7 @@ func create(ctx context.Context, s *service, r *taskAPI.CreateTaskRequest) (*con
|
||||
|
||||
case vc.PodContainer:
|
||||
var span otelTrace.Span
|
||||
span, s.ctx = trace(s.ctx, "create")
|
||||
span, ctx = trace(s.ctx, "create")
|
||||
defer span.End()
|
||||
|
||||
if s.sandbox == nil {
|
||||
|
@ -17,12 +17,12 @@ import (
|
||||
func deleteContainer(ctx context.Context, s *service, c *container) error {
|
||||
if !c.cType.IsSandbox() {
|
||||
if c.status != task.StatusStopped {
|
||||
if _, err := s.sandbox.StopContainer(c.id, false); err != nil && !isNotFound(err) {
|
||||
if _, err := s.sandbox.StopContainer(ctx, c.id, false); err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.sandbox.DeleteContainer(c.id); err != nil && !isNotFound(err) {
|
||||
if _, err := s.sandbox.DeleteContainer(ctx, c.id); err != nil && !isNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,8 @@
|
||||
package containerdshim
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/cgroups"
|
||||
"github.com/containerd/typeurl"
|
||||
|
||||
@ -13,8 +15,8 @@ import (
|
||||
vc "github.com/kata-containers/kata-containers/src/runtime/virtcontainers"
|
||||
)
|
||||
|
||||
func marshalMetrics(s *service, containerID string) (*google_protobuf.Any, error) {
|
||||
stats, err := s.sandbox.StatsContainer(containerID)
|
||||
func marshalMetrics(ctx context.Context, s *service, containerID string) (*google_protobuf.Any, error) {
|
||||
stats, err := s.sandbox.StatsContainer(ctx, containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ func New(ctx context.Context, id string, publisher events.Publisher) (cdshim.Shi
|
||||
|
||||
go s.processExits()
|
||||
|
||||
go s.forward(publisher)
|
||||
go s.forward(ctx, publisher)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
@ -233,9 +233,9 @@ func (s *service) StartShim(ctx context.Context, id, containerdBinary, container
|
||||
return address, nil
|
||||
}
|
||||
|
||||
func (s *service) forward(publisher events.Publisher) {
|
||||
func (s *service) forward(ctx context.Context, publisher events.Publisher) {
|
||||
for e := range s.events {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, timeOut)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeOut)
|
||||
err := publisher.Publish(ctx, getTopic(e), e)
|
||||
cancel()
|
||||
if err != nil {
|
||||
@ -300,7 +300,7 @@ func trace(ctx context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
}
|
||||
|
||||
func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) {
|
||||
span, _ := trace(s.ctx, "Cleanup")
|
||||
span, ctx := trace(ctx, "Cleanup")
|
||||
defer span.End()
|
||||
|
||||
//Since the binary cleanup will return the DeleteResponse from stdout to
|
||||
@ -411,7 +411,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *
|
||||
|
||||
// Start a process
|
||||
func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) {
|
||||
span, _ := trace(s.ctx, "Start")
|
||||
span, ctx := trace(ctx, "Start")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -462,7 +462,7 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP
|
||||
|
||||
// Delete the initial process and container
|
||||
func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) {
|
||||
span, _ := trace(s.ctx, "Delete")
|
||||
span, ctx := trace(ctx, "Delete")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -514,7 +514,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
|
||||
|
||||
// Exec an additional process inside the container
|
||||
func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "Exec")
|
||||
span, ctx := trace(ctx, "Exec")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -552,7 +552,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *p
|
||||
|
||||
// ResizePty of a process
|
||||
func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "ResizePty")
|
||||
span, ctx := trace(ctx, "ResizePty")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -581,7 +581,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_
|
||||
processID = execs.id
|
||||
|
||||
}
|
||||
err = s.sandbox.WinsizeProcess(c.id, processID, r.Height, r.Width)
|
||||
err = s.sandbox.WinsizeProcess(ctx, c.id, processID, r.Height, r.Width)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -591,7 +591,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_
|
||||
|
||||
// State returns runtime state information for a process
|
||||
func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) {
|
||||
span, _ := trace(s.ctx, "State")
|
||||
span, ctx := trace(ctx, "State")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -643,7 +643,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
|
||||
|
||||
// Pause the container
|
||||
func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "Pause")
|
||||
span, ctx := trace(ctx, "Pause")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -662,7 +662,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes
|
||||
|
||||
c.status = task.StatusPausing
|
||||
|
||||
err = s.sandbox.PauseContainer(r.ID)
|
||||
err = s.sandbox.PauseContainer(ctx, r.ID)
|
||||
if err == nil {
|
||||
c.status = task.StatusPaused
|
||||
s.send(&eventstypes.TaskPaused{
|
||||
@ -682,7 +682,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes
|
||||
|
||||
// Resume the container
|
||||
func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "Resume")
|
||||
span, ctx := trace(ctx, "Resume")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -699,7 +699,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = s.sandbox.ResumeContainer(c.id)
|
||||
err = s.sandbox.ResumeContainer(ctx, c.id)
|
||||
if err == nil {
|
||||
c.status = task.StatusRunning
|
||||
s.send(&eventstypes.TaskResumed{
|
||||
@ -719,7 +719,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp
|
||||
|
||||
// Kill a process with the provided signal
|
||||
func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "Kill")
|
||||
span, ctx := trace(ctx, "Kill")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -773,14 +773,14 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E
|
||||
return empty, nil
|
||||
}
|
||||
|
||||
return empty, s.sandbox.SignalProcess(c.id, processID, signum, r.All)
|
||||
return empty, s.sandbox.SignalProcess(ctx, c.id, processID, signum, r.All)
|
||||
}
|
||||
|
||||
// Pids returns all pids inside the container
|
||||
// Since for kata, it cannot get the process's pid from VM,
|
||||
// thus only return the Shim's pid directly.
|
||||
func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) {
|
||||
span, _ := trace(s.ctx, "Pids")
|
||||
span, ctx := trace(ctx, "Pids")
|
||||
defer span.End()
|
||||
|
||||
var processes []*task.ProcessInfo
|
||||
@ -803,7 +803,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.
|
||||
|
||||
// CloseIO of a process
|
||||
func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "CloseIO")
|
||||
span, ctx := trace(ctx, "CloseIO")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -844,7 +844,7 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *pt
|
||||
|
||||
// Checkpoint the container
|
||||
func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "Checkpoint")
|
||||
span, ctx := trace(ctx, "Checkpoint")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -858,7 +858,7 @@ func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskReque
|
||||
|
||||
// Connect returns shim information such as the shim's pid
|
||||
func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) {
|
||||
span, _ := trace(s.ctx, "Connect")
|
||||
span, ctx := trace(ctx, "Connect")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -878,7 +878,7 @@ func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *ta
|
||||
}
|
||||
|
||||
func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "Shutdown")
|
||||
span, ctx := trace(ctx, "Shutdown")
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
@ -906,7 +906,7 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *
|
||||
}
|
||||
|
||||
func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) {
|
||||
span, _ := trace(s.ctx, "Stats")
|
||||
span, ctx := trace(ctx, "Stats")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -923,7 +923,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := marshalMetrics(s, c.id)
|
||||
data, err := marshalMetrics(ctx, s, c.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -935,7 +935,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP
|
||||
|
||||
// Update a running container
|
||||
func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) {
|
||||
span, _ := trace(s.ctx, "Update")
|
||||
span, ctx := trace(ctx, "Update")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
@ -957,7 +957,7 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "Invalid resources type for %s", s.id)
|
||||
}
|
||||
|
||||
err = s.sandbox.UpdateContainer(r.ID, *resources)
|
||||
err = s.sandbox.UpdateContainer(ctx, r.ID, *resources)
|
||||
if err != nil {
|
||||
return nil, errdefs.ToGRPC(err)
|
||||
}
|
||||
@ -967,7 +967,7 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *
|
||||
|
||||
// Wait for a process to exit
|
||||
func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) {
|
||||
span, _ := trace(s.ctx, "Wait")
|
||||
span, ctx := trace(ctx, "Wait")
|
||||
defer span.End()
|
||||
|
||||
var ret uint32
|
||||
|
@ -75,7 +75,8 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// get metrics from agent
|
||||
agentMetrics, err := s.sandbox.GetAgentMetrics()
|
||||
// can not pass context to serveMetrics, so use background context
|
||||
agentMetrics, err := s.sandbox.GetAgentMetrics(context.Background())
|
||||
if err != nil {
|
||||
shimMgtLog.WithError(err).Error("failed GetAgentMetrics")
|
||||
if isGRPCErrorCode(codes.NotFound, err) {
|
||||
@ -96,7 +97,7 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
// collect pod overhead metrics need sleep to get the changes of cpu/memory resources usage
|
||||
// so here only trigger the collect operation, and the data will be gathered
|
||||
// next time collection request from Prometheus server
|
||||
go s.setPodOverheadMetrics()
|
||||
go s.setPodOverheadMetrics(context.Background())
|
||||
}
|
||||
|
||||
func decodeAgentMetrics(body string) []*dto.MetricFamily {
|
||||
|
@ -6,6 +6,7 @@
|
||||
package containerdshim
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
mutils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
|
||||
@ -135,15 +136,15 @@ func updateShimMetrics() error {
|
||||
}
|
||||
|
||||
// statsSandbox returns a detailed sandbox stats.
|
||||
func (s *service) statsSandbox() (vc.SandboxStats, []vc.ContainerStats, error) {
|
||||
sandboxStats, err := s.sandbox.Stats()
|
||||
func (s *service) statsSandbox(ctx context.Context) (vc.SandboxStats, []vc.ContainerStats, error) {
|
||||
sandboxStats, err := s.sandbox.Stats(ctx)
|
||||
if err != nil {
|
||||
return vc.SandboxStats{}, []vc.ContainerStats{}, err
|
||||
}
|
||||
|
||||
containerStats := []vc.ContainerStats{}
|
||||
for _, c := range s.sandbox.GetAllContainers() {
|
||||
cstats, err := s.sandbox.StatsContainer(c.ID())
|
||||
cstats, err := s.sandbox.StatsContainer(ctx, c.ID())
|
||||
if err != nil {
|
||||
return vc.SandboxStats{}, []vc.ContainerStats{}, err
|
||||
}
|
||||
@ -179,9 +180,9 @@ func calcOverhead(initialSandboxStats, finishSandboxStats vc.SandboxStats, initi
|
||||
return float64(hostMemoryUsage - guestMemoryUsage), float64(cpuUsageHost - cpuUsageGuest)
|
||||
}
|
||||
|
||||
func (s *service) getPodOverhead() (float64, float64, error) {
|
||||
func (s *service) getPodOverhead(ctx context.Context) (float64, float64, error) {
|
||||
initTime := time.Now().UnixNano()
|
||||
initialSandboxStats, initialContainerStats, err := s.statsSandbox()
|
||||
initialSandboxStats, initialContainerStats, err := s.statsSandbox(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
@ -191,7 +192,7 @@ func (s *service) getPodOverhead() (float64, float64, error) {
|
||||
finishtTime := time.Now().UnixNano()
|
||||
deltaTime := float64(finishtTime - initTime)
|
||||
|
||||
finishSandboxStats, finishContainersStats, err := s.statsSandbox()
|
||||
finishSandboxStats, finishContainersStats, err := s.statsSandbox(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
@ -199,8 +200,8 @@ func (s *service) getPodOverhead() (float64, float64, error) {
|
||||
return mem, cpu, nil
|
||||
}
|
||||
|
||||
func (s *service) setPodOverheadMetrics() error {
|
||||
mem, cpu, err := s.getPodOverhead()
|
||||
func (s *service) setPodOverheadMetrics(ctx context.Context) error {
|
||||
mem, cpu, err := s.getPodOverhead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -26,22 +26,22 @@ func startContainer(ctx context.Context, s *service, c *container) error {
|
||||
}
|
||||
|
||||
if c.cType.IsSandbox() {
|
||||
err := s.sandbox.Start()
|
||||
err := s.sandbox.Start(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Start monitor after starting sandbox
|
||||
s.monitor, err = s.sandbox.Monitor()
|
||||
s.monitor, err = s.sandbox.Monitor(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go watchSandbox(s)
|
||||
go watchSandbox(ctx, s)
|
||||
|
||||
// We don't rely on the context passed to startContainer as it can be cancelled after
|
||||
// this rpc call.
|
||||
go watchOOMEvents(s.ctx, s)
|
||||
go watchOOMEvents(ctx, s)
|
||||
} else {
|
||||
_, err := s.sandbox.StartContainer(c.id)
|
||||
_, err := s.sandbox.StartContainer(ctx, c.id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -82,7 +82,7 @@ func startContainer(ctx context.Context, s *service, c *container) error {
|
||||
close(c.stdinCloser)
|
||||
}
|
||||
|
||||
go wait(s, c, "")
|
||||
go wait(ctx, s, c, "")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -99,7 +99,7 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (*ex
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, proc, err := s.sandbox.EnterContainer(containerID, *execs.cmds)
|
||||
_, proc, err := s.sandbox.EnterContainer(ctx, containerID, *execs.cmds)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("cannot enter container %s, with err %s", containerID, err)
|
||||
return nil, err
|
||||
@ -108,7 +108,7 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (*ex
|
||||
|
||||
execs.status = task.StatusRunning
|
||||
if execs.tty.height != 0 && execs.tty.width != 0 {
|
||||
err = s.sandbox.WinsizeProcess(c.id, execs.id, execs.tty.height, execs.tty.width)
|
||||
err = s.sandbox.WinsizeProcess(ctx, c.id, execs.id, execs.tty.height, execs.tty.width)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -129,7 +129,7 @@ func startExec(ctx context.Context, s *service, containerID, execID string) (*ex
|
||||
|
||||
go ioCopy(execs.exitIOch, execs.stdinCloser, tty, stdin, stdout, stderr)
|
||||
|
||||
go wait(s, c, execID)
|
||||
go wait(ctx, s, c, execID)
|
||||
|
||||
return execs, nil
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
const defaultCheckInterval = 1 * time.Second
|
||||
|
||||
func wait(s *service, c *container, execID string) (int32, error) {
|
||||
func wait(ctx context.Context, s *service, c *container, execID string) (int32, error) {
|
||||
var execs *exec
|
||||
var err error
|
||||
|
||||
@ -43,7 +43,7 @@ func wait(s *service, c *container, execID string) (int32, error) {
|
||||
processID = execs.id
|
||||
}
|
||||
|
||||
ret, err := s.sandbox.WaitProcess(c.id, processID)
|
||||
ret, err := s.sandbox.WaitProcess(ctx, c.id, processID)
|
||||
if err != nil {
|
||||
shimLog.WithError(err).WithFields(logrus.Fields{
|
||||
"container": c.id,
|
||||
@ -65,15 +65,15 @@ func wait(s *service, c *container, execID string) (int32, error) {
|
||||
if s.monitor != nil {
|
||||
s.monitor <- nil
|
||||
}
|
||||
if err = s.sandbox.Stop(true); err != nil {
|
||||
if err = s.sandbox.Stop(ctx, true); err != nil {
|
||||
shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to stop sandbox")
|
||||
}
|
||||
|
||||
if err = s.sandbox.Delete(); err != nil {
|
||||
if err = s.sandbox.Delete(ctx); err != nil {
|
||||
shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to delete sandbox")
|
||||
}
|
||||
} else {
|
||||
if _, err = s.sandbox.StopContainer(c.id, false); err != nil {
|
||||
if _, err = s.sandbox.StopContainer(ctx, c.id, false); err != nil {
|
||||
shimLog.WithError(err).WithField("container", c.id).Warn("stop container failed")
|
||||
}
|
||||
}
|
||||
@ -97,7 +97,7 @@ func wait(s *service, c *container, execID string) (int32, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func watchSandbox(s *service) {
|
||||
func watchSandbox(ctx context.Context, s *service) {
|
||||
if s.monitor == nil {
|
||||
return
|
||||
}
|
||||
@ -111,11 +111,11 @@ func watchSandbox(s *service) {
|
||||
defer s.mu.Unlock()
|
||||
// sandbox malfunctioning, cleanup as much as we can
|
||||
shimLog.WithError(err).Warn("sandbox stopped unexpectedly")
|
||||
err = s.sandbox.Stop(true)
|
||||
err = s.sandbox.Stop(ctx, true)
|
||||
if err != nil {
|
||||
shimLog.WithError(err).Warn("stop sandbox failed")
|
||||
}
|
||||
err = s.sandbox.Delete()
|
||||
err = s.sandbox.Delete(ctx)
|
||||
if err != nil {
|
||||
shimLog.WithError(err).Warn("delete sandbox failed")
|
||||
}
|
||||
@ -145,7 +145,7 @@ func watchOOMEvents(ctx context.Context, s *service) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
containerID, err := s.sandbox.GetOOMEvent()
|
||||
containerID, err := s.sandbox.GetOOMEvent(ctx)
|
||||
if err != nil {
|
||||
shimLog.WithError(err).Warn("failed to get OOM event from sandbox")
|
||||
// If the GetOOMEvent call is not implemented, then the agent is most likely an older version,
|
||||
|
@ -230,7 +230,7 @@ func CreateContainer(ctx context.Context, sandbox vc.VCSandbox, ociSpec specs.Sp
|
||||
|
||||
span.SetAttributes(label.Key("sandbox").String(sandboxID))
|
||||
|
||||
c, err = sandbox.CreateContainer(contConfig)
|
||||
c, err = sandbox.CreateContainer(ctx, contConfig)
|
||||
if err != nil {
|
||||
return vc.Process{}, err
|
||||
}
|
||||
|
@ -149,8 +149,8 @@ func (a *Acrn) kernelParameters() string {
|
||||
}
|
||||
|
||||
// Adds all capabilities supported by Acrn implementation of hypervisor interface
|
||||
func (a *Acrn) capabilities() types.Capabilities {
|
||||
span, _ := a.trace("capabilities")
|
||||
func (a *Acrn) capabilities(ctx context.Context) types.Capabilities {
|
||||
span, _ := a.trace(ctx, "capabilities")
|
||||
defer span.End()
|
||||
|
||||
return a.arch.capabilities()
|
||||
@ -207,14 +207,14 @@ func (a *Acrn) Logger() *logrus.Entry {
|
||||
return virtLog.WithField("subsystem", "acrn")
|
||||
}
|
||||
|
||||
func (a *Acrn) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if a.ctx == nil {
|
||||
func (a *Acrn) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
a.Logger().WithField("type", "bug").Error("trace called before context set")
|
||||
a.ctx = context.Background()
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(a.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes([]label.KeyValue{label.Key("subsystem").String("hypervisor"), label.Key("type").String("acrn")}...)
|
||||
|
||||
return span, ctx
|
||||
@ -248,14 +248,14 @@ func (a *Acrn) appendImage(devices []Device, imagePath string) ([]Device, error)
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func (a *Acrn) buildDevices(imagePath string) ([]Device, error) {
|
||||
func (a *Acrn) buildDevices(ctx context.Context, imagePath string) ([]Device, error) {
|
||||
var devices []Device
|
||||
|
||||
if imagePath == "" {
|
||||
return nil, fmt.Errorf("Image Path should not be empty: %s", imagePath)
|
||||
}
|
||||
|
||||
_, console, err := a.getSandboxConsole(a.id)
|
||||
_, console, err := a.getSandboxConsole(ctx, a.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -278,7 +278,7 @@ func (a *Acrn) buildDevices(imagePath string) ([]Device, error) {
|
||||
// holder for container rootfs (as acrn doesn't support hot-plug).
|
||||
// Once the container rootfs is known, replace the dummy backend
|
||||
// with actual path (using block rescan feature in acrn)
|
||||
devices, err = a.createDummyVirtioBlkDev(devices)
|
||||
devices, err = a.createDummyVirtioBlkDev(ctx, devices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -287,8 +287,8 @@ func (a *Acrn) buildDevices(imagePath string) ([]Device, error) {
|
||||
}
|
||||
|
||||
// setup sets the Acrn structure up.
|
||||
func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error {
|
||||
span, _ := a.trace("setup")
|
||||
func (a *Acrn) setup(ctx context.Context, id string, hypervisorConfig *HypervisorConfig) error {
|
||||
span, _ := a.trace(ctx, "setup")
|
||||
defer span.End()
|
||||
|
||||
err := hypervisorConfig.valid()
|
||||
@ -330,8 +330,8 @@ func (a *Acrn) setup(id string, hypervisorConfig *HypervisorConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Acrn) createDummyVirtioBlkDev(devices []Device) ([]Device, error) {
|
||||
span, _ := a.trace("createDummyVirtioBlkDev")
|
||||
func (a *Acrn) createDummyVirtioBlkDev(ctx context.Context, devices []Device) ([]Device, error) {
|
||||
span, _ := a.trace(ctx, "createDummyVirtioBlkDev")
|
||||
defer span.End()
|
||||
|
||||
// Since acrn doesn't support hot-plug, dummy virtio-blk
|
||||
@ -354,10 +354,11 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
|
||||
// Save the tracing context
|
||||
a.ctx = ctx
|
||||
|
||||
span, _ := a.trace("createSandbox")
|
||||
var span otelTrace.Span
|
||||
span, ctx = a.trace(ctx, "createSandbox")
|
||||
defer span.End()
|
||||
|
||||
if err := a.setup(id, hypervisorConfig); err != nil {
|
||||
if err := a.setup(ctx, id, hypervisorConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -386,7 +387,7 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
|
||||
return fmt.Errorf("ACRN UUID should not be empty")
|
||||
}
|
||||
|
||||
devices, err := a.buildDevices(imagePath)
|
||||
devices, err := a.buildDevices(ctx, imagePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -418,8 +419,8 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
|
||||
}
|
||||
|
||||
// startSandbox will start the Sandbox's VM.
|
||||
func (a *Acrn) startSandbox(timeoutSecs int) error {
|
||||
span, _ := a.trace("startSandbox")
|
||||
func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error {
|
||||
span, ctx := a.trace(ctx, "startSandbox")
|
||||
defer span.End()
|
||||
|
||||
if a.config.Debug {
|
||||
@ -455,7 +456,7 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
|
||||
}
|
||||
a.state.PID = PID
|
||||
|
||||
if err = a.waitSandbox(timeoutSecs); err != nil {
|
||||
if err = a.waitSandbox(ctx, timeoutSecs); err != nil {
|
||||
a.Logger().WithField("acrn wait failed:", err).Debug()
|
||||
return err
|
||||
}
|
||||
@ -464,8 +465,8 @@ func (a *Acrn) startSandbox(timeoutSecs int) error {
|
||||
}
|
||||
|
||||
// waitSandbox will wait for the Sandbox's VM to be up and running.
|
||||
func (a *Acrn) waitSandbox(timeoutSecs int) error {
|
||||
span, _ := a.trace("waitSandbox")
|
||||
func (a *Acrn) waitSandbox(ctx context.Context, timeoutSecs int) error {
|
||||
span, _ := a.trace(ctx, "waitSandbox")
|
||||
defer span.End()
|
||||
|
||||
if timeoutSecs < 0 {
|
||||
@ -478,8 +479,8 @@ func (a *Acrn) waitSandbox(timeoutSecs int) error {
|
||||
}
|
||||
|
||||
// stopSandbox will stop the Sandbox's VM.
|
||||
func (a *Acrn) stopSandbox() (err error) {
|
||||
span, _ := a.trace("stopSandbox")
|
||||
func (a *Acrn) stopSandbox(ctx context.Context) (err error) {
|
||||
span, _ := a.trace(ctx, "stopSandbox")
|
||||
defer span.End()
|
||||
|
||||
a.Logger().Info("Stopping acrn VM")
|
||||
@ -568,8 +569,8 @@ func (a *Acrn) updateBlockDevice(drive *config.BlockDrive) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Acrn) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := a.trace("hotplugAddDevice")
|
||||
func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, ctx := a.trace(ctx, "hotplugAddDevice")
|
||||
defer span.End()
|
||||
|
||||
switch devType {
|
||||
@ -582,8 +583,8 @@ func (a *Acrn) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Acrn) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := a.trace("hotplugRemoveDevice")
|
||||
func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, ctx := a.trace(ctx, "hotplugRemoveDevice")
|
||||
defer span.End()
|
||||
|
||||
// Not supported. return success
|
||||
@ -591,8 +592,8 @@ func (a *Acrn) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (int
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *Acrn) pauseSandbox() error {
|
||||
span, _ := a.trace("pauseSandbox")
|
||||
func (a *Acrn) pauseSandbox(ctx context.Context) error {
|
||||
span, _ := a.trace(ctx, "pauseSandbox")
|
||||
defer span.End()
|
||||
|
||||
// Not supported. return success
|
||||
@ -600,8 +601,8 @@ func (a *Acrn) pauseSandbox() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Acrn) resumeSandbox() error {
|
||||
span, _ := a.trace("resumeSandbox")
|
||||
func (a *Acrn) resumeSandbox(ctx context.Context) error {
|
||||
span, _ := a.trace(ctx, "resumeSandbox")
|
||||
defer span.End()
|
||||
|
||||
// Not supported. return success
|
||||
@ -610,9 +611,9 @@ func (a *Acrn) resumeSandbox() error {
|
||||
}
|
||||
|
||||
// addDevice will add extra devices to acrn command line.
|
||||
func (a *Acrn) addDevice(devInfo interface{}, devType deviceType) error {
|
||||
func (a *Acrn) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
|
||||
var err error
|
||||
span, _ := a.trace("addDevice")
|
||||
span, _ := a.trace(ctx, "addDevice")
|
||||
defer span.End()
|
||||
|
||||
switch v := devInfo.(type) {
|
||||
@ -644,8 +645,8 @@ func (a *Acrn) addDevice(devInfo interface{}, devType deviceType) error {
|
||||
|
||||
// getSandboxConsole builds the path of the console where we can read
|
||||
// logs coming from the sandbox.
|
||||
func (a *Acrn) getSandboxConsole(id string) (string, string, error) {
|
||||
span, _ := a.trace("getSandboxConsole")
|
||||
func (a *Acrn) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
|
||||
span, _ := a.trace(ctx, "getSandboxConsole")
|
||||
defer span.End()
|
||||
|
||||
consoleURL, err := utils.BuildSocketPath(a.store.RunVMStoragePath(), id, acrnConsoleSocket)
|
||||
@ -664,15 +665,15 @@ func (a *Acrn) saveSandbox() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Acrn) disconnect() {
|
||||
span, _ := a.trace("disconnect")
|
||||
func (a *Acrn) disconnect(ctx context.Context) {
|
||||
span, _ := a.trace(ctx, "disconnect")
|
||||
defer span.End()
|
||||
|
||||
// Not supported.
|
||||
}
|
||||
|
||||
func (a *Acrn) getThreadIDs() (vcpuThreadIDs, error) {
|
||||
span, _ := a.trace("getThreadIDs")
|
||||
func (a *Acrn) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
|
||||
span, _ := a.trace(ctx, "getThreadIDs")
|
||||
defer span.End()
|
||||
|
||||
// Not supported. return success
|
||||
@ -681,16 +682,16 @@ func (a *Acrn) getThreadIDs() (vcpuThreadIDs, error) {
|
||||
return vcpuThreadIDs{}, nil
|
||||
}
|
||||
|
||||
func (a *Acrn) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
func (a *Acrn) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
return 0, memoryDevice{}, nil
|
||||
}
|
||||
|
||||
func (a *Acrn) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
func (a *Acrn) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (a *Acrn) cleanup() error {
|
||||
span, _ := a.trace("cleanup")
|
||||
func (a *Acrn) cleanup(ctx context.Context) error {
|
||||
span, _ := a.trace(ctx, "cleanup")
|
||||
defer span.End()
|
||||
|
||||
return nil
|
||||
@ -704,7 +705,7 @@ func (a *Acrn) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
|
||||
return errors.New("acrn is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (a *Acrn) toGrpc() ([]byte, error) {
|
||||
func (a *Acrn) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("acrn is not supported by VM cache")
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ func testAcrnAddDevice(t *testing.T, devInfo interface{}, devType deviceType, ex
|
||||
arch: &acrnArchBase{},
|
||||
}
|
||||
|
||||
err := a.addDevice(devInfo, devType)
|
||||
err := a.addDevice(context.Background(), devInfo, devType)
|
||||
assert.NoError(err)
|
||||
assert.Exactly(a.acrnConfig.Devices, expected)
|
||||
}
|
||||
|
@ -74,13 +74,13 @@ type agent interface {
|
||||
capabilities() types.Capabilities
|
||||
|
||||
// check will check the agent liveness
|
||||
check() error
|
||||
check(ctx context.Context) error
|
||||
|
||||
// tell whether the agent is long live connected or not
|
||||
longLiveConn() bool
|
||||
|
||||
// disconnect will disconnect the connection to the agent
|
||||
disconnect() error
|
||||
disconnect(ctx context.Context) error
|
||||
|
||||
// get agent url
|
||||
getAgentURL() (string, error)
|
||||
@ -92,111 +92,111 @@ type agent interface {
|
||||
reuseAgent(agent agent) error
|
||||
|
||||
// createSandbox will tell the agent to perform necessary setup for a Sandbox.
|
||||
createSandbox(sandbox *Sandbox) error
|
||||
createSandbox(ctx context.Context, sandbox *Sandbox) error
|
||||
|
||||
// exec will tell the agent to run a command in an already running container.
|
||||
exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error)
|
||||
exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error)
|
||||
|
||||
// startSandbox will tell the agent to start all containers related to the Sandbox.
|
||||
startSandbox(sandbox *Sandbox) error
|
||||
startSandbox(ctx context.Context, sandbox *Sandbox) error
|
||||
|
||||
// stopSandbox will tell the agent to stop all containers related to the Sandbox.
|
||||
stopSandbox(sandbox *Sandbox) error
|
||||
stopSandbox(ctx context.Context, sandbox *Sandbox) error
|
||||
|
||||
// createContainer will tell the agent to create a container related to a Sandbox.
|
||||
createContainer(sandbox *Sandbox, c *Container) (*Process, error)
|
||||
createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (*Process, error)
|
||||
|
||||
// startContainer will tell the agent to start a container related to a Sandbox.
|
||||
startContainer(sandbox *Sandbox, c *Container) error
|
||||
startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error
|
||||
|
||||
// stopContainer will tell the agent to stop a container related to a Sandbox.
|
||||
stopContainer(sandbox *Sandbox, c Container) error
|
||||
stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error
|
||||
|
||||
// signalProcess will tell the agent to send a signal to a
|
||||
// container or a process related to a Sandbox. If all is true, all processes in
|
||||
// the container will be sent the signal.
|
||||
signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error
|
||||
signalProcess(ctx context.Context, c *Container, processID string, signal syscall.Signal, all bool) error
|
||||
|
||||
// winsizeProcess will tell the agent to set a process' tty size
|
||||
winsizeProcess(c *Container, processID string, height, width uint32) error
|
||||
winsizeProcess(ctx context.Context, c *Container, processID string, height, width uint32) error
|
||||
|
||||
// writeProcessStdin will tell the agent to write a process stdin
|
||||
writeProcessStdin(c *Container, ProcessID string, data []byte) (int, error)
|
||||
writeProcessStdin(ctx context.Context, c *Container, ProcessID string, data []byte) (int, error)
|
||||
|
||||
// closeProcessStdin will tell the agent to close a process stdin
|
||||
closeProcessStdin(c *Container, ProcessID string) error
|
||||
closeProcessStdin(ctx context.Context, c *Container, ProcessID string) error
|
||||
|
||||
// readProcessStdout will tell the agent to read a process stdout
|
||||
readProcessStdout(c *Container, processID string, data []byte) (int, error)
|
||||
readProcessStdout(ctx context.Context, c *Container, processID string, data []byte) (int, error)
|
||||
|
||||
// readProcessStderr will tell the agent to read a process stderr
|
||||
readProcessStderr(c *Container, processID string, data []byte) (int, error)
|
||||
readProcessStderr(ctx context.Context, c *Container, processID string, data []byte) (int, error)
|
||||
|
||||
// processListContainer will list the processes running inside the container
|
||||
processListContainer(sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error)
|
||||
processListContainer(ctx context.Context, sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error)
|
||||
|
||||
// updateContainer will update the resources of a running container
|
||||
updateContainer(sandbox *Sandbox, c Container, resources specs.LinuxResources) error
|
||||
updateContainer(ctx context.Context, sandbox *Sandbox, c Container, resources specs.LinuxResources) error
|
||||
|
||||
// waitProcess will wait for the exit code of a process
|
||||
waitProcess(c *Container, processID string) (int32, error)
|
||||
waitProcess(ctx context.Context, c *Container, processID string) (int32, error)
|
||||
|
||||
// onlineCPUMem will online CPUs and Memory inside the Sandbox.
|
||||
// This function should be called after hot adding vCPUs or Memory.
|
||||
// cpus specifies the number of CPUs that were added and the agent should online
|
||||
// cpuOnly specifies that we should online cpu or online memory or both
|
||||
onlineCPUMem(cpus uint32, cpuOnly bool) error
|
||||
onlineCPUMem(ctx context.Context, cpus uint32, cpuOnly bool) error
|
||||
|
||||
// memHotplugByProbe will notify the guest kernel about memory hotplug event through
|
||||
// probe interface.
|
||||
// This function should be called after hot adding Memory and before online memory.
|
||||
// addr specifies the address of the recently hotplugged or unhotplugged memory device.
|
||||
memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error
|
||||
memHotplugByProbe(ctx context.Context, addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error
|
||||
|
||||
// statsContainer will tell the agent to get stats from a container related to a Sandbox
|
||||
statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error)
|
||||
statsContainer(ctx context.Context, sandbox *Sandbox, c Container) (*ContainerStats, error)
|
||||
|
||||
// pauseContainer will pause a container
|
||||
pauseContainer(sandbox *Sandbox, c Container) error
|
||||
pauseContainer(ctx context.Context, sandbox *Sandbox, c Container) error
|
||||
|
||||
// resumeContainer will resume a paused container
|
||||
resumeContainer(sandbox *Sandbox, c Container) error
|
||||
resumeContainer(ctx context.Context, sandbox *Sandbox, c Container) error
|
||||
|
||||
// configure will update agent settings based on provided arguments
|
||||
configure(h hypervisor, id, sharePath string, config interface{}) error
|
||||
configure(ctx context.Context, h hypervisor, id, sharePath string, config interface{}) error
|
||||
|
||||
// configureFromGrpc will update agent settings based on provided arguments which from Grpc
|
||||
configureFromGrpc(h hypervisor, id string, config interface{}) error
|
||||
|
||||
// reseedRNG will reseed the guest random number generator
|
||||
reseedRNG(data []byte) error
|
||||
reseedRNG(ctx context.Context, data []byte) error
|
||||
|
||||
// updateInterface will tell the agent to update a nic for an existed Sandbox.
|
||||
updateInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error)
|
||||
updateInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error)
|
||||
|
||||
// listInterfaces will tell the agent to list interfaces of an existed Sandbox
|
||||
listInterfaces() ([]*pbTypes.Interface, error)
|
||||
listInterfaces(ctx context.Context) ([]*pbTypes.Interface, error)
|
||||
|
||||
// updateRoutes will tell the agent to update route table for an existed Sandbox.
|
||||
updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error)
|
||||
updateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error)
|
||||
|
||||
// listRoutes will tell the agent to list routes of an existed Sandbox
|
||||
listRoutes() ([]*pbTypes.Route, error)
|
||||
listRoutes(ctx context.Context) ([]*pbTypes.Route, error)
|
||||
|
||||
// getGuestDetails will tell the agent to get some information of guest
|
||||
getGuestDetails(*grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error)
|
||||
getGuestDetails(context.Context, *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error)
|
||||
|
||||
// setGuestDateTime asks the agent to set guest time to the provided one
|
||||
setGuestDateTime(time.Time) error
|
||||
setGuestDateTime(context.Context, time.Time) error
|
||||
|
||||
// copyFile copies file from host to container's rootfs
|
||||
copyFile(src, dst string) error
|
||||
copyFile(ctx context.Context, src, dst string) error
|
||||
|
||||
// markDead tell agent that the guest is dead
|
||||
markDead()
|
||||
markDead(ctx context.Context)
|
||||
|
||||
// cleanup removes all on disk information generated by the agent
|
||||
cleanup(s *Sandbox)
|
||||
cleanup(ctx context.Context, s *Sandbox)
|
||||
|
||||
// return data for saving
|
||||
save() persistapi.AgentState
|
||||
@ -206,8 +206,8 @@ type agent interface {
|
||||
|
||||
// getOOMEvent will wait on OOM events that occur in the sandbox.
|
||||
// Will return the ID of the container where the event occurred.
|
||||
getOOMEvent() (string, error)
|
||||
getOOMEvent(ctx context.Context, ) (string, error)
|
||||
|
||||
// getAgentMetrics get metrics of agent and guest through agent
|
||||
getAgentMetrics(*grpc.GetMetricsRequest) (*grpc.Metrics, error)
|
||||
getAgentMetrics(context.Context, *grpc.GetMetricsRequest) (*grpc.Metrics, error)
|
||||
}
|
||||
|
@ -74,19 +74,19 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
|
||||
// cleanup sandbox resources in case of any failure
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.Delete()
|
||||
s.Delete(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create the sandbox network
|
||||
if err = s.createNetwork(); err != nil {
|
||||
if err = s.createNetwork(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// network rollback
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.removeNetwork()
|
||||
s.removeNetwork(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -102,30 +102,30 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
|
||||
}
|
||||
|
||||
// Start the VM
|
||||
if err = s.startVM(); err != nil {
|
||||
if err = s.startVM(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// rollback to stop VM if error occurs
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.stopVM()
|
||||
s.stopVM(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
s.postCreatedNetwork()
|
||||
s.postCreatedNetwork(ctx)
|
||||
|
||||
if err = s.getAndStoreGuestDetails(); err != nil {
|
||||
if err = s.getAndStoreGuestDetails(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create Containers
|
||||
if err = s.createContainers(); err != nil {
|
||||
if err = s.createContainers(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The sandbox is completely created now, we can store it.
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -157,14 +157,14 @@ func CleanupContainer(ctx context.Context, sandboxID, containerID string, force
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.Release()
|
||||
defer s.Release(ctx)
|
||||
|
||||
_, err = s.StopContainer(containerID, force)
|
||||
_, err = s.StopContainer(ctx, containerID, force)
|
||||
if err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.DeleteContainer(containerID)
|
||||
_, err = s.DeleteContainer(ctx, containerID)
|
||||
if err != nil && !force {
|
||||
return err
|
||||
}
|
||||
@ -173,11 +173,11 @@ func CleanupContainer(ctx context.Context, sandboxID, containerID string, force
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = s.Stop(force); err != nil && !force {
|
||||
if err = s.Stop(ctx, force); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = s.Delete(); err != nil {
|
||||
if err = s.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -295,12 +295,12 @@ func TestCleanupContainer(t *testing.T) {
|
||||
for _, contID := range contIDs {
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
|
||||
c, err := p.CreateContainer(contConfig)
|
||||
c, err := s.CreateContainer(context.Background(), contConfig)
|
||||
if c == nil || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c, err = p.StartContainer(c.ID())
|
||||
c, err = p.StartContainer(context.Background(), c.ID())
|
||||
if c == nil || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
@ -87,19 +88,19 @@ func (endpoint *BridgedMacvlanEndpoint) NetworkPair() *NetworkInterfacePair {
|
||||
|
||||
// Attach for virtual endpoint bridges the network pair and adds the
|
||||
// tap interface of the network pair to the hypervisor.
|
||||
func (endpoint *BridgedMacvlanEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *BridgedMacvlanEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
h := s.hypervisor
|
||||
if err := xConnectVMNetwork(endpoint, h); err != nil {
|
||||
if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
|
||||
networkLogger().WithError(err).Error("Error bridging virtual ep")
|
||||
return err
|
||||
}
|
||||
|
||||
return h.addDevice(endpoint, netDev)
|
||||
return h.addDevice(ctx, endpoint, netDev)
|
||||
}
|
||||
|
||||
// Detach for the virtual endpoint tears down the tap and bridge
|
||||
// created for the veth interface.
|
||||
func (endpoint *BridgedMacvlanEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *BridgedMacvlanEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
// The network namespace would have been deleted at this point
|
||||
// if it has not been created by virtcontainers.
|
||||
if !netNsCreated {
|
||||
@ -112,12 +113,12 @@ func (endpoint *BridgedMacvlanEndpoint) Detach(netNsCreated bool, netNsPath stri
|
||||
}
|
||||
|
||||
// HotAttach for physical endpoint not supported yet
|
||||
func (endpoint *BridgedMacvlanEndpoint) HotAttach(h hypervisor) error {
|
||||
func (endpoint *BridgedMacvlanEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot attach")
|
||||
}
|
||||
|
||||
// HotDetach for physical endpoint not supported yet
|
||||
func (endpoint *BridgedMacvlanEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *BridgedMacvlanEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
return fmt.Errorf("BridgedMacvlanEndpoint does not support Hot detach")
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,8 @@ func (clh *cloudHypervisor) checkVersion() error {
|
||||
func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
|
||||
clh.ctx = ctx
|
||||
|
||||
span, _ := clh.trace("createSandbox")
|
||||
var span otelTrace.Span
|
||||
span, clh.ctx = clh.trace(clh.ctx, "createSandbox")
|
||||
defer span.End()
|
||||
|
||||
err := hypervisorConfig.valid()
|
||||
@ -337,8 +338,8 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
|
||||
}
|
||||
|
||||
// startSandbox will start the VMM and boot the virtual machine for the given sandbox.
|
||||
func (clh *cloudHypervisor) startSandbox(timeout int) error {
|
||||
span, _ := clh.trace("startSandbox")
|
||||
func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error {
|
||||
span, ctx := clh.trace(ctx, "startSandbox")
|
||||
defer span.End()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second)
|
||||
@ -378,7 +379,7 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
|
||||
|
||||
pid, err := clh.LaunchClh()
|
||||
if err != nil {
|
||||
if shutdownErr := clh.virtiofsd.Stop(); shutdownErr != nil {
|
||||
if shutdownErr := clh.virtiofsd.Stop(ctx); shutdownErr != nil {
|
||||
clh.Logger().WithField("error", shutdownErr).Warn("error shutting down Virtiofsd")
|
||||
}
|
||||
return fmt.Errorf("failed to launch cloud-hypervisor: %q", err)
|
||||
@ -395,7 +396,7 @@ func (clh *cloudHypervisor) startSandbox(timeout int) error {
|
||||
|
||||
// getSandboxConsole builds the path of the console where we can read
|
||||
// logs coming from the sandbox.
|
||||
func (clh *cloudHypervisor) getSandboxConsole(id string) (string, string, error) {
|
||||
func (clh *cloudHypervisor) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
|
||||
clh.Logger().WithField("function", "getSandboxConsole").WithField("id", id).Info("Get Sandbox Console")
|
||||
master, slave, err := console.NewPty()
|
||||
if err != nil {
|
||||
@ -407,11 +408,11 @@ func (clh *cloudHypervisor) getSandboxConsole(id string) (string, string, error)
|
||||
return consoleProtoPty, slave, nil
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) disconnect() {
|
||||
func (clh *cloudHypervisor) disconnect(ctx context.Context) {
|
||||
clh.Logger().WithField("function", "disconnect").Info("Disconnecting Sandbox Console")
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) getThreadIDs() (vcpuThreadIDs, error) {
|
||||
func (clh *cloudHypervisor) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
|
||||
|
||||
clh.Logger().WithField("function", "getThreadIDs").Info("get thread ID's")
|
||||
|
||||
@ -473,8 +474,8 @@ func (clh *cloudHypervisor) hotPlugVFIODevice(device config.VFIODev) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := clh.trace("hotplugAddDevice")
|
||||
func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := clh.trace(ctx, "hotplugAddDevice")
|
||||
defer span.End()
|
||||
|
||||
switch devType {
|
||||
@ -490,8 +491,8 @@ func (clh *cloudHypervisor) hotplugAddDevice(devInfo interface{}, devType device
|
||||
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := clh.trace("hotplugRemoveDevice")
|
||||
func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, ctx := clh.trace(ctx, "hotplugRemoveDevice")
|
||||
defer span.End()
|
||||
|
||||
var deviceID string
|
||||
@ -525,7 +526,7 @@ func (clh *cloudHypervisor) hypervisorConfig() HypervisorConfig {
|
||||
return clh.config
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
|
||||
// TODO: Add support for virtio-mem
|
||||
|
||||
@ -590,7 +591,7 @@ func (clh *cloudHypervisor) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint
|
||||
return uint32(newMem.ToMiB()), memoryDevice{sizeMB: int(hotplugSize.ToMiB())}, nil
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
cl := clh.client()
|
||||
|
||||
// Retrieve the number of current vCPUs via HTTP API
|
||||
@ -630,12 +631,12 @@ func (clh *cloudHypervisor) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, n
|
||||
return currentVCPUs, newVCPUs, nil
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) cleanup() error {
|
||||
func (clh *cloudHypervisor) cleanup(ctx context.Context) error {
|
||||
clh.Logger().WithField("function", "cleanup").Info("cleanup")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) pauseSandbox() error {
|
||||
func (clh *cloudHypervisor) pauseSandbox(ctx context.Context) error {
|
||||
clh.Logger().WithField("function", "pauseSandbox").Info("Pause Sandbox")
|
||||
return nil
|
||||
}
|
||||
@ -645,24 +646,24 @@ func (clh *cloudHypervisor) saveSandbox() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) resumeSandbox() error {
|
||||
func (clh *cloudHypervisor) resumeSandbox(ctx context.Context) error {
|
||||
clh.Logger().WithField("function", "resumeSandbox").Info("Resume Sandbox")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopSandbox will stop the Sandbox's VM.
|
||||
func (clh *cloudHypervisor) stopSandbox() (err error) {
|
||||
span, _ := clh.trace("stopSandbox")
|
||||
func (clh *cloudHypervisor) stopSandbox(ctx context.Context) (err error) {
|
||||
span, ctx := clh.trace(ctx, "stopSandbox")
|
||||
defer span.End()
|
||||
clh.Logger().WithField("function", "stopSandbox").Info("Stop Sandbox")
|
||||
return clh.terminate()
|
||||
return clh.terminate(ctx)
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error {
|
||||
return errors.New("cloudHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) toGrpc() ([]byte, error) {
|
||||
func (clh *cloudHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("cloudHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
@ -697,8 +698,8 @@ func (clh *cloudHypervisor) getPids() []int {
|
||||
return pids
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) addDevice(devInfo interface{}, devType deviceType) error {
|
||||
span, _ := clh.trace("addDevice")
|
||||
func (clh *cloudHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
|
||||
span, _ := clh.trace(ctx, "addDevice")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
@ -731,8 +732,8 @@ func (clh *cloudHypervisor) Logger() *log.Entry {
|
||||
}
|
||||
|
||||
// Adds all capabilities supported by cloudHypervisor implementation of hypervisor interface
|
||||
func (clh *cloudHypervisor) capabilities() types.Capabilities {
|
||||
span, _ := clh.trace("capabilities")
|
||||
func (clh *cloudHypervisor) capabilities(ctx context.Context) types.Capabilities {
|
||||
span, _ := clh.trace(ctx, "capabilities")
|
||||
defer span.End()
|
||||
|
||||
clh.Logger().WithField("function", "capabilities").Info("get Capabilities")
|
||||
@ -742,21 +743,21 @@ func (clh *cloudHypervisor) capabilities() types.Capabilities {
|
||||
return caps
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if clh.ctx == nil {
|
||||
func (clh *cloudHypervisor) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
clh.Logger().WithField("type", "bug").Error("trace called before context set")
|
||||
clh.ctx = context.Background()
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(clh.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("clh")}...)
|
||||
|
||||
return span, ctx
|
||||
}
|
||||
|
||||
func (clh *cloudHypervisor) terminate() (err error) {
|
||||
span, _ := clh.trace("terminate")
|
||||
func (clh *cloudHypervisor) terminate(ctx context.Context) (err error) {
|
||||
span, ctx := clh.trace(ctx, "terminate")
|
||||
defer span.End()
|
||||
|
||||
pid := clh.state.PID
|
||||
@ -817,7 +818,7 @@ func (clh *cloudHypervisor) terminate() (err error) {
|
||||
}
|
||||
|
||||
clh.Logger().Debug("stop virtiofsd")
|
||||
if err = clh.virtiofsd.Stop(); err != nil {
|
||||
if err = clh.virtiofsd.Stop(ctx); err != nil {
|
||||
clh.Logger().Error("failed to stop virtiofsd")
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ func TestClooudHypervisorStartSandbox(t *testing.T) {
|
||||
store: store,
|
||||
}
|
||||
|
||||
err = clh.startSandbox(10)
|
||||
err = clh.startSandbox(context.Background(), 10)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@ -300,7 +300,7 @@ func TestCloudHypervisorResizeMemory(t *testing.T) {
|
||||
clh.APIClient = mockClient
|
||||
clh.config = clhConfig
|
||||
|
||||
newMem, memDev, err := clh.resizeMemory(tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false)
|
||||
newMem, memDev, err := clh.resizeMemory(context.Background(), tt.args.reqMemMB, tt.args.memoryBlockSizeMB, false)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("cloudHypervisor.resizeMemory() error = %v, expected to fail = %v", err, tt.wantErr)
|
||||
@ -400,12 +400,12 @@ func TestCloudHypervisorHotplugRemoveDevice(t *testing.T) {
|
||||
clh.config = clhConfig
|
||||
clh.APIClient = &clhClientMock{}
|
||||
|
||||
_, err = clh.hotplugRemoveDevice(&config.BlockDrive{}, blockDev)
|
||||
_, err = clh.hotplugRemoveDevice(context.Background(), &config.BlockDrive{}, blockDev)
|
||||
assert.NoError(err, "Hotplug remove block device expected no error")
|
||||
|
||||
_, err = clh.hotplugRemoveDevice(&config.VFIODev{}, vfioDev)
|
||||
_, err = clh.hotplugRemoveDevice(context.Background(), &config.VFIODev{}, vfioDev)
|
||||
assert.NoError(err, "Hotplug remove vfio block device expected no error")
|
||||
|
||||
_, err = clh.hotplugRemoveDevice(nil, netDev)
|
||||
_, err = clh.hotplugRemoveDevice(context.Background(), nil, netDev)
|
||||
assert.Error(err, "Hotplug remove pmem block device expected error")
|
||||
}
|
||||
|
@ -353,14 +353,14 @@ func (c *Container) Logger() *logrus.Entry {
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Container) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if c.ctx == nil {
|
||||
func (c *Container) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
c.Logger().WithField("type", "bug").Error("trace called before context set")
|
||||
c.ctx = context.Background()
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(c.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes(otelLabel.Key("subsystem").String("container"))
|
||||
|
||||
return span, ctx
|
||||
@ -437,7 +437,7 @@ func (c *Container) setContainerState(state types.StateString) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, guestSharedDir string) (string, bool, error) {
|
||||
func (c *Container) shareFiles(ctx context.Context, m Mount, idx int, hostSharedDir, hostMountDir, guestSharedDir string) (string, bool, error) {
|
||||
randBytes, err := utils.GenerateRandomBytes(8)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
@ -448,7 +448,7 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, gu
|
||||
|
||||
// copy file to contaier's rootfs if filesystem sharing is not supported, otherwise
|
||||
// bind mount it in the shared directory.
|
||||
caps := c.sandbox.hypervisor.capabilities()
|
||||
caps := c.sandbox.hypervisor.capabilities(ctx)
|
||||
if !caps.IsFsSharingSupported() {
|
||||
c.Logger().Debug("filesystem sharing is not supported, files will be copied")
|
||||
|
||||
@ -466,13 +466,13 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, gu
|
||||
return "", true, nil
|
||||
}
|
||||
|
||||
if err := c.sandbox.agent.copyFile(m.Source, guestDest); err != nil {
|
||||
if err := c.sandbox.agent.copyFile(ctx, m.Source, guestDest); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
} else {
|
||||
// These mounts are created in the shared dir
|
||||
mountDest := filepath.Join(hostMountDir, filename)
|
||||
if err := bindMount(c.ctx, m.Source, mountDest, m.ReadOnly, "private"); err != nil {
|
||||
if err := bindMount(ctx, m.Source, mountDest, m.ReadOnly, "private"); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
// Save HostPath mount value into the mount list of the container.
|
||||
@ -494,14 +494,14 @@ func (c *Container) shareFiles(m Mount, idx int, hostSharedDir, hostMountDir, gu
|
||||
// It also updates the container mount list with the HostPath info, and store
|
||||
// container mounts to the storage. This way, we will have the HostPath info
|
||||
// available when we will need to unmount those mounts.
|
||||
func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestSharedDir string) (sharedDirMounts map[string]Mount, ignoredMounts map[string]Mount, err error) {
|
||||
func (c *Container) mountSharedDirMounts(ctx context.Context, hostSharedDir, hostMountDir, guestSharedDir string) (sharedDirMounts map[string]Mount, ignoredMounts map[string]Mount, err error) {
|
||||
sharedDirMounts = make(map[string]Mount)
|
||||
ignoredMounts = make(map[string]Mount)
|
||||
var devicesToDetach []string
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, id := range devicesToDetach {
|
||||
c.sandbox.devManager.DetachDevice(id, c.sandbox)
|
||||
c.sandbox.devManager.DetachDevice(ctx, id, c.sandbox)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -517,7 +517,7 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestShare
|
||||
// instead of passing this as a shared mount:
|
||||
if len(m.BlockDeviceID) > 0 {
|
||||
// Attach this block device, all other devices passed in the config have been attached at this point
|
||||
if err = c.sandbox.devManager.AttachDevice(m.BlockDeviceID, c.sandbox); err != nil {
|
||||
if err = c.sandbox.devManager.AttachDevice(ctx, m.BlockDeviceID, c.sandbox); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
devicesToDetach = append(devicesToDetach, m.BlockDeviceID)
|
||||
@ -545,7 +545,7 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestShare
|
||||
|
||||
var ignore bool
|
||||
var guestDest string
|
||||
guestDest, ignore, err = c.shareFiles(m, idx, hostSharedDir, hostMountDir, guestSharedDir)
|
||||
guestDest, ignore, err = c.shareFiles(ctx, m, idx, hostSharedDir, hostMountDir, guestSharedDir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -570,14 +570,14 @@ func (c *Container) mountSharedDirMounts(hostSharedDir, hostMountDir, guestShare
|
||||
return sharedDirMounts, ignoredMounts, nil
|
||||
}
|
||||
|
||||
func (c *Container) unmountHostMounts() error {
|
||||
func (c *Container) unmountHostMounts(ctx context.Context) error {
|
||||
var span otelTrace.Span
|
||||
span, c.ctx = c.trace("unmountHostMounts")
|
||||
span, ctx = c.trace(ctx, "unmountHostMounts")
|
||||
defer span.End()
|
||||
|
||||
for _, m := range c.mounts {
|
||||
if m.HostPath != "" {
|
||||
span, _ := c.trace("unmount")
|
||||
span, _ := c.trace(ctx, "unmount")
|
||||
span.SetAttributes(otelLabel.Key("host-path").String(m.HostPath))
|
||||
|
||||
if err := syscall.Unmount(m.HostPath, syscall.MNT_DETACH|UmountNoFollow); err != nil {
|
||||
@ -634,8 +634,8 @@ func filterDevices(c *Container, devices []ContainerDevice) (ret []ContainerDevi
|
||||
// Add any mount based block devices to the device manager and save the
|
||||
// device ID for the particular mount. This'll occur when the mountpoint source
|
||||
// is a block device.
|
||||
func (c *Container) createBlockDevices() error {
|
||||
if !c.checkBlockDeviceSupport() {
|
||||
func (c *Container) createBlockDevices(ctx context.Context) error {
|
||||
if !c.checkBlockDeviceSupport(ctx) {
|
||||
c.Logger().Warn("Block device not supported")
|
||||
return nil
|
||||
}
|
||||
@ -699,8 +699,8 @@ func (c *Container) createBlockDevices() error {
|
||||
}
|
||||
|
||||
// newContainer creates a Container structure from a sandbox and a container configuration.
|
||||
func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, error) {
|
||||
span, _ := sandbox.trace("newContainer")
|
||||
func newContainer(ctx context.Context, sandbox *Sandbox, contConfig *ContainerConfig) (*Container, error) {
|
||||
span, ctx := sandbox.trace(ctx, "newContainer")
|
||||
defer span.End()
|
||||
|
||||
if !contConfig.valid() {
|
||||
@ -734,7 +734,7 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er
|
||||
}
|
||||
|
||||
// If mounts are block devices, add to devmanager
|
||||
if err := c.createMounts(); err != nil {
|
||||
if err := c.createMounts(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -746,9 +746,9 @@ func newContainer(sandbox *Sandbox, contConfig *ContainerConfig) (*Container, er
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Container) createMounts() error {
|
||||
func (c *Container) createMounts(ctx context.Context) error {
|
||||
// Create block devices for newly created container
|
||||
return c.createBlockDevices()
|
||||
return c.createBlockDevices(ctx)
|
||||
}
|
||||
|
||||
func (c *Container) createDevices(contConfig *ContainerConfig) error {
|
||||
@ -777,25 +777,25 @@ func (c *Container) createDevices(contConfig *ContainerConfig) error {
|
||||
// been performed before the container creation failed.
|
||||
// - Unplug CPU and memory resources from the VM.
|
||||
// - Unplug devices from the VM.
|
||||
func (c *Container) rollbackFailingContainerCreation() {
|
||||
if err := c.detachDevices(); err != nil {
|
||||
func (c *Container) rollbackFailingContainerCreation(ctx context.Context) {
|
||||
if err := c.detachDevices(ctx); err != nil {
|
||||
c.Logger().WithError(err).Error("rollback failed detachDevices()")
|
||||
}
|
||||
if err := c.removeDrive(); err != nil {
|
||||
if err := c.removeDrive(ctx); err != nil {
|
||||
c.Logger().WithError(err).Error("rollback failed removeDrive()")
|
||||
}
|
||||
if err := c.unmountHostMounts(); err != nil {
|
||||
if err := c.unmountHostMounts(ctx); err != nil {
|
||||
c.Logger().WithError(err).Error("rollback failed unmountHostMounts()")
|
||||
}
|
||||
if err := bindUnmountContainerRootfs(c.ctx, getMountPath(c.sandbox.id), c.id); err != nil {
|
||||
if err := bindUnmountContainerRootfs(ctx, getMountPath(c.sandbox.id), c.id); err != nil {
|
||||
c.Logger().WithError(err).Error("rollback failed bindUnmountContainerRootfs()")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Container) checkBlockDeviceSupport() bool {
|
||||
func (c *Container) checkBlockDeviceSupport(ctx context.Context) bool {
|
||||
if !c.sandbox.config.HypervisorConfig.DisableBlockDeviceUse {
|
||||
agentCaps := c.sandbox.agent.capabilities()
|
||||
hypervisorCaps := c.sandbox.hypervisor.capabilities()
|
||||
hypervisorCaps := c.sandbox.hypervisor.capabilities(ctx)
|
||||
|
||||
if agentCaps.IsBlockDeviceSupported() && hypervisorCaps.IsBlockDeviceHotplugSupported() {
|
||||
return true
|
||||
@ -807,19 +807,19 @@ func (c *Container) checkBlockDeviceSupport() bool {
|
||||
|
||||
// createContainer creates and start a container inside a Sandbox. It has to be
|
||||
// called only when a new container, not known by the sandbox, has to be created.
|
||||
func (c *Container) create() (err error) {
|
||||
func (c *Container) create(ctx context.Context) (err error) {
|
||||
// In case the container creation fails, the following takes care
|
||||
// of rolling back all the actions previously performed.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.Logger().WithError(err).Error("container create failed")
|
||||
c.rollbackFailingContainerCreation()
|
||||
c.rollbackFailingContainerCreation(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
if c.checkBlockDeviceSupport() {
|
||||
if c.checkBlockDeviceSupport(ctx) {
|
||||
// If the rootfs is backed by a block device, go ahead and hotplug it to the guest
|
||||
if err = c.hotplugDrive(); err != nil {
|
||||
if err = c.hotplugDrive(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -853,7 +853,7 @@ func (c *Container) create() (err error) {
|
||||
"devices": normalAttachedDevs,
|
||||
}).Info("normal attach devices")
|
||||
if len(normalAttachedDevs) > 0 {
|
||||
if err = c.attachDevices(normalAttachedDevs); err != nil {
|
||||
if err = c.attachDevices(ctx, normalAttachedDevs); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -862,7 +862,7 @@ func (c *Container) create() (err error) {
|
||||
// inside the VM
|
||||
c.getSystemMountInfo()
|
||||
|
||||
process, err := c.sandbox.agent.createContainer(c.sandbox, c)
|
||||
process, err := c.sandbox.agent.createContainer(ctx, c.sandbox, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -874,7 +874,7 @@ func (c *Container) create() (err error) {
|
||||
"machine_type": machineType,
|
||||
"devices": delayAttachedDevs,
|
||||
}).Info("lazy attach devices")
|
||||
if err = c.attachDevices(delayAttachedDevs); err != nil {
|
||||
if err = c.attachDevices(ctx, delayAttachedDevs); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -892,7 +892,7 @@ func (c *Container) create() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) delete() error {
|
||||
func (c *Container) delete(ctx context.Context) error {
|
||||
if c.state.State != types.StateReady &&
|
||||
c.state.State != types.StateStopped {
|
||||
return fmt.Errorf("Container not ready or stopped, impossible to delete")
|
||||
@ -910,7 +910,7 @@ func (c *Container) delete() error {
|
||||
}
|
||||
}
|
||||
|
||||
return c.sandbox.storeSandbox()
|
||||
return c.sandbox.storeSandbox(ctx)
|
||||
}
|
||||
|
||||
// checkSandboxRunning validates the container state.
|
||||
@ -943,7 +943,7 @@ func (c *Container) getSystemMountInfo() {
|
||||
// TODO Deduce /dev/shm size. See https://github.com/clearcontainers/runtime/issues/138
|
||||
}
|
||||
|
||||
func (c *Container) start() error {
|
||||
func (c *Container) start(ctx context.Context) error {
|
||||
if err := c.checkSandboxRunning("start"); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -957,10 +957,10 @@ func (c *Container) start() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.sandbox.agent.startContainer(c.sandbox, c); err != nil {
|
||||
if err := c.sandbox.agent.startContainer(ctx, c.sandbox, c); err != nil {
|
||||
c.Logger().WithError(err).Error("Failed to start container")
|
||||
|
||||
if err := c.stop(true); err != nil {
|
||||
if err := c.stop(ctx, true); err != nil {
|
||||
c.Logger().WithError(err).Warn("Failed to stop container")
|
||||
}
|
||||
return err
|
||||
@ -969,8 +969,9 @@ func (c *Container) start() error {
|
||||
return c.setContainerState(types.StateRunning)
|
||||
}
|
||||
|
||||
func (c *Container) stop(force bool) error {
|
||||
span, _ := c.trace("stop")
|
||||
func (c *Container) stop(ctx context.Context, force bool) error {
|
||||
var span otelTrace.Span
|
||||
span, ctx = c.trace(ctx, "stop")
|
||||
defer span.End()
|
||||
|
||||
// In case the container status has been updated implicitly because
|
||||
@ -992,13 +993,13 @@ func (c *Container) stop(force bool) error {
|
||||
// Force the container to be killed. For most of the cases, this
|
||||
// should not matter and it should return an error that will be
|
||||
// ignored.
|
||||
c.kill(syscall.SIGKILL, true)
|
||||
c.kill(ctx, syscall.SIGKILL, true)
|
||||
|
||||
// Since the agent has supported the MultiWaitProcess, it's better to
|
||||
// wait the process here to make sure the process has exited before to
|
||||
// issue stopContainer, otherwise the RemoveContainerRequest in it will
|
||||
// get failed if the process hasn't exited.
|
||||
c.sandbox.agent.waitProcess(c, c.id)
|
||||
c.sandbox.agent.waitProcess(ctx, c, c.id)
|
||||
|
||||
defer func() {
|
||||
// Save device and drive data.
|
||||
@ -1008,23 +1009,23 @@ func (c *Container) stop(force bool) error {
|
||||
}
|
||||
}()
|
||||
|
||||
if err := c.sandbox.agent.stopContainer(c.sandbox, *c); err != nil && !force {
|
||||
if err := c.sandbox.agent.stopContainer(ctx, c.sandbox, *c); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.unmountHostMounts(); err != nil && !force {
|
||||
if err := c.unmountHostMounts(ctx); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := bindUnmountContainerRootfs(c.ctx, getMountPath(c.sandbox.id), c.id); err != nil && !force {
|
||||
if err := bindUnmountContainerRootfs(ctx, getMountPath(c.sandbox.id), c.id); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.detachDevices(); err != nil && !force {
|
||||
if err := c.detachDevices(ctx); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.removeDrive(); err != nil && !force {
|
||||
if err := c.removeDrive(ctx); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1043,7 +1044,7 @@ func (c *Container) stop(force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) enter(cmd types.Cmd) (*Process, error) {
|
||||
func (c *Container) enter(ctx context.Context, cmd types.Cmd) (*Process, error) {
|
||||
if err := c.checkSandboxRunning("enter"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1054,7 +1055,7 @@ func (c *Container) enter(cmd types.Cmd) (*Process, error) {
|
||||
"impossible to enter")
|
||||
}
|
||||
|
||||
process, err := c.sandbox.agent.exec(c.sandbox, *c, cmd)
|
||||
process, err := c.sandbox.agent.exec(ctx, c.sandbox, *c, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1062,21 +1063,21 @@ func (c *Container) enter(cmd types.Cmd) (*Process, error) {
|
||||
return process, nil
|
||||
}
|
||||
|
||||
func (c *Container) wait(processID string) (int32, error) {
|
||||
func (c *Container) wait(ctx context.Context, processID string) (int32, error) {
|
||||
if c.state.State != types.StateReady &&
|
||||
c.state.State != types.StateRunning {
|
||||
return 0, fmt.Errorf("Container not ready or running, " +
|
||||
"impossible to wait")
|
||||
}
|
||||
|
||||
return c.sandbox.agent.waitProcess(c, processID)
|
||||
return c.sandbox.agent.waitProcess(ctx, c, processID)
|
||||
}
|
||||
|
||||
func (c *Container) kill(signal syscall.Signal, all bool) error {
|
||||
return c.signalProcess(c.process.Token, signal, all)
|
||||
func (c *Container) kill(ctx context.Context, signal syscall.Signal, all bool) error {
|
||||
return c.signalProcess(ctx, c.process.Token, signal, all)
|
||||
}
|
||||
|
||||
func (c *Container) signalProcess(processID string, signal syscall.Signal, all bool) error {
|
||||
func (c *Container) signalProcess(ctx context.Context, processID string, signal syscall.Signal, all bool) error {
|
||||
if c.sandbox.state.State != types.StateReady && c.sandbox.state.State != types.StateRunning {
|
||||
return fmt.Errorf("Sandbox not ready or running, impossible to signal the container")
|
||||
}
|
||||
@ -1085,15 +1086,15 @@ func (c *Container) signalProcess(processID string, signal syscall.Signal, all b
|
||||
return fmt.Errorf("Container not ready, running or paused, impossible to signal the container")
|
||||
}
|
||||
|
||||
return c.sandbox.agent.signalProcess(c, processID, signal, all)
|
||||
return c.sandbox.agent.signalProcess(ctx, c, processID, signal, all)
|
||||
}
|
||||
|
||||
func (c *Container) winsizeProcess(processID string, height, width uint32) error {
|
||||
func (c *Container) winsizeProcess(ctx context.Context, processID string, height, width uint32) error {
|
||||
if c.state.State != types.StateReady && c.state.State != types.StateRunning {
|
||||
return fmt.Errorf("Container not ready or running, impossible to signal the container")
|
||||
}
|
||||
|
||||
return c.sandbox.agent.winsizeProcess(c, processID, height, width)
|
||||
return c.sandbox.agent.winsizeProcess(ctx, c, processID, height, width)
|
||||
}
|
||||
|
||||
func (c *Container) ioStream(processID string) (io.WriteCloser, io.Reader, io.Reader, error) {
|
||||
@ -1106,7 +1107,7 @@ func (c *Container) ioStream(processID string) (io.WriteCloser, io.Reader, io.Re
|
||||
return stream.stdin(), stream.stdout(), stream.stderr(), nil
|
||||
}
|
||||
|
||||
func (c *Container) processList(options ProcessListOptions) (ProcessList, error) {
|
||||
func (c *Container) processList(ctx context.Context, options ProcessListOptions) (ProcessList, error) {
|
||||
if err := c.checkSandboxRunning("ps"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1115,17 +1116,17 @@ func (c *Container) processList(options ProcessListOptions) (ProcessList, error)
|
||||
return nil, fmt.Errorf("Container not running, impossible to list processes")
|
||||
}
|
||||
|
||||
return c.sandbox.agent.processListContainer(c.sandbox, *c, options)
|
||||
return c.sandbox.agent.processListContainer(ctx, c.sandbox, *c, options)
|
||||
}
|
||||
|
||||
func (c *Container) stats() (*ContainerStats, error) {
|
||||
func (c *Container) stats(ctx context.Context) (*ContainerStats, error) {
|
||||
if err := c.checkSandboxRunning("stats"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.sandbox.agent.statsContainer(c.sandbox, *c)
|
||||
return c.sandbox.agent.statsContainer(ctx, c.sandbox, *c)
|
||||
}
|
||||
|
||||
func (c *Container) update(resources specs.LinuxResources) error {
|
||||
func (c *Container) update(ctx context.Context, resources specs.LinuxResources) error {
|
||||
if err := c.checkSandboxRunning("update"); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1161,7 +1162,7 @@ func (c *Container) update(resources specs.LinuxResources) error {
|
||||
c.config.Resources.Memory.Limit = mem.Limit
|
||||
}
|
||||
|
||||
if err := c.sandbox.updateResources(); err != nil {
|
||||
if err := c.sandbox.updateResources(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1179,10 +1180,10 @@ func (c *Container) update(resources specs.LinuxResources) error {
|
||||
resources.CPU.Cpus = ""
|
||||
}
|
||||
|
||||
return c.sandbox.agent.updateContainer(c.sandbox, *c, resources)
|
||||
return c.sandbox.agent.updateContainer(ctx, c.sandbox, *c, resources)
|
||||
}
|
||||
|
||||
func (c *Container) pause() error {
|
||||
func (c *Container) pause(ctx context.Context) error {
|
||||
if err := c.checkSandboxRunning("pause"); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1191,14 +1192,14 @@ func (c *Container) pause() error {
|
||||
return fmt.Errorf("Container not running, impossible to pause")
|
||||
}
|
||||
|
||||
if err := c.sandbox.agent.pauseContainer(c.sandbox, *c); err != nil {
|
||||
if err := c.sandbox.agent.pauseContainer(ctx, c.sandbox, *c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.setContainerState(types.StatePaused)
|
||||
}
|
||||
|
||||
func (c *Container) resume() error {
|
||||
func (c *Container) resume(ctx context.Context) error {
|
||||
if err := c.checkSandboxRunning("resume"); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1207,7 +1208,7 @@ func (c *Container) resume() error {
|
||||
return fmt.Errorf("Container not paused, impossible to resume")
|
||||
}
|
||||
|
||||
if err := c.sandbox.agent.resumeContainer(c.sandbox, *c); err != nil {
|
||||
if err := c.sandbox.agent.resumeContainer(ctx, c.sandbox, *c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1216,7 +1217,7 @@ func (c *Container) resume() error {
|
||||
|
||||
// hotplugDrive will attempt to hotplug the container rootfs if it is backed by a
|
||||
// block device
|
||||
func (c *Container) hotplugDrive() error {
|
||||
func (c *Container) hotplugDrive(ctx context.Context) error {
|
||||
var dev device
|
||||
var err error
|
||||
|
||||
@ -1276,7 +1277,7 @@ func (c *Container) hotplugDrive() error {
|
||||
"fs-type": fsType,
|
||||
}).Info("Block device detected")
|
||||
|
||||
if err = c.plugDevice(devicePath); err != nil {
|
||||
if err = c.plugDevice(ctx, devicePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1284,13 +1285,13 @@ func (c *Container) hotplugDrive() error {
|
||||
}
|
||||
|
||||
// plugDevice will attach the rootfs if blockdevice is supported (this is rootfs specific)
|
||||
func (c *Container) plugDevice(devicePath string) error {
|
||||
func (c *Container) plugDevice(ctx context.Context, devicePath string) error {
|
||||
var stat unix.Stat_t
|
||||
if err := unix.Stat(devicePath, &stat); err != nil {
|
||||
return fmt.Errorf("stat %q failed: %v", devicePath, err)
|
||||
}
|
||||
|
||||
if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK {
|
||||
if c.checkBlockDeviceSupport(ctx) && stat.Mode&unix.S_IFBLK == unix.S_IFBLK {
|
||||
b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{
|
||||
HostPath: devicePath,
|
||||
ContainerPath: filepath.Join(kataGuestSharedDir(), c.id),
|
||||
@ -1305,7 +1306,7 @@ func (c *Container) plugDevice(devicePath string) error {
|
||||
c.state.BlockDeviceID = b.DeviceID()
|
||||
|
||||
// attach rootfs device
|
||||
if err := c.sandbox.devManager.AttachDevice(b.DeviceID(), c.sandbox); err != nil {
|
||||
if err := c.sandbox.devManager.AttachDevice(ctx, b.DeviceID(), c.sandbox); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1317,12 +1318,12 @@ func (c *Container) isDriveUsed() bool {
|
||||
return !(c.state.Fstype == "")
|
||||
}
|
||||
|
||||
func (c *Container) removeDrive() (err error) {
|
||||
func (c *Container) removeDrive(ctx context.Context) (err error) {
|
||||
if c.isDriveUsed() {
|
||||
c.Logger().Info("unplugging block device")
|
||||
|
||||
devID := c.state.BlockDeviceID
|
||||
err := c.sandbox.devManager.DetachDevice(devID, c.sandbox)
|
||||
err := c.sandbox.devManager.DetachDevice(ctx, devID, c.sandbox)
|
||||
if err != nil && err != manager.ErrDeviceNotAttached {
|
||||
return err
|
||||
}
|
||||
@ -1343,7 +1344,7 @@ func (c *Container) removeDrive() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) attachDevices(devices []ContainerDevice) error {
|
||||
func (c *Container) attachDevices(ctx context.Context, devices []ContainerDevice) error {
|
||||
// there's no need to do rollback when error happens,
|
||||
// because if attachDevices fails, container creation will fail too,
|
||||
// and rollbackFailingContainerCreation could do all the rollbacks
|
||||
@ -1352,16 +1353,16 @@ func (c *Container) attachDevices(devices []ContainerDevice) error {
|
||||
// the devices need to be split into two lists, normalAttachedDevs and delayAttachedDevs.
|
||||
// so c.device is not used here. See issue https://github.com/kata-containers/runtime/issues/2460.
|
||||
for _, dev := range devices {
|
||||
if err := c.sandbox.devManager.AttachDevice(dev.ID, c.sandbox); err != nil {
|
||||
if err := c.sandbox.devManager.AttachDevice(ctx, dev.ID, c.sandbox); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) detachDevices() error {
|
||||
func (c *Container) detachDevices(ctx context.Context) error {
|
||||
for _, dev := range c.devices {
|
||||
err := c.sandbox.devManager.DetachDevice(dev.ID, c.sandbox)
|
||||
err := c.sandbox.devManager.DetachDevice(ctx, dev.ID, c.sandbox)
|
||||
if err != nil && err != manager.ErrDeviceNotAttached {
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,8 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -29,8 +31,8 @@ func DeviceLogger() *logrus.Entry {
|
||||
// a device should be attached/added/plugged to a DeviceReceiver
|
||||
type DeviceReceiver interface {
|
||||
// these are for hotplug/hot-unplug devices to/from hypervisor
|
||||
HotplugAddDevice(Device, config.DeviceType) error
|
||||
HotplugRemoveDevice(Device, config.DeviceType) error
|
||||
HotplugAddDevice(context.Context, Device, config.DeviceType) error
|
||||
HotplugRemoveDevice(context.Context, Device, config.DeviceType) error
|
||||
|
||||
// this is only for virtio-blk and virtio-scsi support
|
||||
GetAndSetSandboxBlockIndex() (int, error)
|
||||
@ -38,13 +40,13 @@ type DeviceReceiver interface {
|
||||
GetHypervisorType() string
|
||||
|
||||
// this is for appending device to hypervisor boot params
|
||||
AppendDevice(Device) error
|
||||
AppendDevice(context.Context, Device) error
|
||||
}
|
||||
|
||||
// Device is the virtcontainers device interface.
|
||||
type Device interface {
|
||||
Attach(DeviceReceiver) error
|
||||
Detach(DeviceReceiver) error
|
||||
Attach(context.Context, DeviceReceiver) error
|
||||
Detach(context.Context, DeviceReceiver) error
|
||||
|
||||
// ID returns device identifier
|
||||
DeviceID() string
|
||||
@ -87,8 +89,8 @@ type Device interface {
|
||||
type DeviceManager interface {
|
||||
NewDevice(config.DeviceInfo) (Device, error)
|
||||
RemoveDevice(string) error
|
||||
AttachDevice(string, DeviceReceiver) error
|
||||
DetachDevice(string, DeviceReceiver) error
|
||||
AttachDevice(context.Context, string, DeviceReceiver) error
|
||||
DetachDevice(context.Context, string, DeviceReceiver) error
|
||||
IsDeviceAttached(string) bool
|
||||
GetDeviceByID(string) Device
|
||||
GetAllDevices() []Device
|
||||
|
@ -6,6 +6,8 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
)
|
||||
|
||||
@ -13,12 +15,12 @@ import (
|
||||
type MockDeviceReceiver struct{}
|
||||
|
||||
// HotplugAddDevice adds a new device
|
||||
func (mockDC *MockDeviceReceiver) HotplugAddDevice(Device, config.DeviceType) error {
|
||||
func (mockDC *MockDeviceReceiver) HotplugAddDevice(context.Context, Device, config.DeviceType) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HotplugRemoveDevice removes a device
|
||||
func (mockDC *MockDeviceReceiver) HotplugRemoveDevice(Device, config.DeviceType) error {
|
||||
func (mockDC *MockDeviceReceiver) HotplugRemoveDevice(context.Context, Device, config.DeviceType) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -33,7 +35,7 @@ func (mockDC *MockDeviceReceiver) UnsetSandboxBlockIndex(int) error {
|
||||
}
|
||||
|
||||
// AppendDevice adds new vhost user device
|
||||
func (mockDC *MockDeviceReceiver) AppendDevice(Device) error {
|
||||
func (mockDC *MockDeviceReceiver) AppendDevice(context.Context, Device) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
|
||||
@ -35,7 +36,7 @@ func NewBlockDevice(devInfo *config.DeviceInfo) *BlockDevice {
|
||||
|
||||
// Attach is standard interface of api.Device, it's used to add device to some
|
||||
// DeviceReceiver
|
||||
func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
|
||||
func (device *BlockDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
|
||||
skip, err := device.bumpAttachCount(true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -112,7 +113,7 @@ func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
|
||||
|
||||
deviceLogger().WithField("device", device.DeviceInfo.HostPath).WithField("VirtPath", drive.VirtPath).Infof("Attaching %s device", customOptions["block-driver"])
|
||||
device.BlockDrive = drive
|
||||
if err = devReceiver.HotplugAddDevice(device, config.DeviceBlock); err != nil {
|
||||
if err = devReceiver.HotplugAddDevice(ctx, device, config.DeviceBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -121,7 +122,7 @@ func (device *BlockDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
|
||||
|
||||
// Detach is standard interface of api.Device, it's used to remove device from some
|
||||
// DeviceReceiver
|
||||
func (device *BlockDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
func (device *BlockDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
|
||||
skip, err := device.bumpAttachCount(false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -140,7 +141,7 @@ func (device *BlockDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
|
||||
deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Unplugging block device")
|
||||
|
||||
if err = devReceiver.HotplugRemoveDevice(device, config.DeviceBlock); err != nil {
|
||||
if err = devReceiver.HotplugRemoveDevice(ctx, device, config.DeviceBlock); err != nil {
|
||||
deviceLogger().WithError(err).Error("Failed to unplug block device")
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
|
||||
@ -32,13 +33,13 @@ func NewGenericDevice(devInfo *config.DeviceInfo) *GenericDevice {
|
||||
}
|
||||
|
||||
// Attach is standard interface of api.Device
|
||||
func (device *GenericDevice) Attach(devReceiver api.DeviceReceiver) error {
|
||||
func (device *GenericDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) error {
|
||||
_, err := device.bumpAttachCount(true)
|
||||
return err
|
||||
}
|
||||
|
||||
// Detach is standard interface of api.Device
|
||||
func (device *GenericDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
func (device *GenericDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
|
||||
_, err := device.bumpAttachCount(false)
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -56,7 +57,7 @@ func NewVFIODevice(devInfo *config.DeviceInfo) *VFIODevice {
|
||||
|
||||
// Attach is standard interface of api.Device, it's used to add device to some
|
||||
// DeviceReceiver
|
||||
func (device *VFIODevice) Attach(devReceiver api.DeviceReceiver) (retErr error) {
|
||||
func (device *VFIODevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (retErr error) {
|
||||
skip, err := device.bumpAttachCount(true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -105,13 +106,13 @@ func (device *VFIODevice) Attach(devReceiver api.DeviceReceiver) (retErr error)
|
||||
deviceLogger().WithField("cold-plug", coldPlug).Info("Attaching VFIO device")
|
||||
|
||||
if coldPlug {
|
||||
if err := devReceiver.AppendDevice(device); err != nil {
|
||||
if err := devReceiver.AppendDevice(ctx, device); err != nil {
|
||||
deviceLogger().WithError(err).Error("Failed to append device")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// hotplug a VFIO device is actually hotplugging a group of iommu devices
|
||||
if err := devReceiver.HotplugAddDevice(device, config.DeviceVFIO); err != nil {
|
||||
if err := devReceiver.HotplugAddDevice(ctx, device, config.DeviceVFIO); err != nil {
|
||||
deviceLogger().WithError(err).Error("Failed to add device")
|
||||
return err
|
||||
}
|
||||
@ -126,7 +127,7 @@ func (device *VFIODevice) Attach(devReceiver api.DeviceReceiver) (retErr error)
|
||||
|
||||
// Detach is standard interface of api.Device, it's used to remove device from some
|
||||
// DeviceReceiver
|
||||
func (device *VFIODevice) Detach(devReceiver api.DeviceReceiver) (retErr error) {
|
||||
func (device *VFIODevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) (retErr error) {
|
||||
skip, err := device.bumpAttachCount(false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -151,7 +152,7 @@ func (device *VFIODevice) Detach(devReceiver api.DeviceReceiver) (retErr error)
|
||||
}
|
||||
|
||||
// hotplug a VFIO device is actually hotplugging a group of iommu devices
|
||||
if err := devReceiver.HotplugRemoveDevice(device, config.DeviceVFIO); err != nil {
|
||||
if err := devReceiver.HotplugRemoveDevice(ctx, device, config.DeviceVFIO); err != nil {
|
||||
deviceLogger().WithError(err).Error("Failed to remove device")
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,8 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/config"
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
@ -36,7 +38,7 @@ func NewVhostUserBlkDevice(devInfo *config.DeviceInfo) *VhostUserBlkDevice {
|
||||
|
||||
// Attach is standard interface of api.Device, it's used to add device to some
|
||||
// DeviceReceiver
|
||||
func (device *VhostUserBlkDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
|
||||
func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
|
||||
skip, err := device.bumpAttachCount(true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -85,7 +87,7 @@ func (device *VhostUserBlkDevice) Attach(devReceiver api.DeviceReceiver) (err er
|
||||
}).Info("Attaching device")
|
||||
|
||||
device.VhostUserDeviceAttrs = vAttrs
|
||||
if err = devReceiver.HotplugAddDevice(device, config.VhostUserBlk); err != nil {
|
||||
if err = devReceiver.HotplugAddDevice(ctx, device, config.VhostUserBlk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -114,7 +116,7 @@ func isVirtioBlkBlockDriver(customOptions map[string]string) bool {
|
||||
|
||||
// Detach is standard interface of api.Device, it's used to remove device from some
|
||||
// DeviceReceiver
|
||||
func (device *VhostUserBlkDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
func (device *VhostUserBlkDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
|
||||
skip, err := device.bumpAttachCount(false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -136,7 +138,7 @@ func (device *VhostUserBlkDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
|
||||
deviceLogger().WithField("device", device.DeviceInfo.HostPath).Info("Unplugging vhost-user-blk device")
|
||||
|
||||
if err = devReceiver.HotplugRemoveDevice(device, config.VhostUserBlk); err != nil {
|
||||
if err = devReceiver.HotplugRemoveDevice(ctx, device, config.VhostUserBlk); err != nil {
|
||||
deviceLogger().WithError(err).Error("Failed to unplug vhost-user-blk device")
|
||||
return err
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
|
||||
@ -21,7 +22,7 @@ type VhostUserFSDevice struct {
|
||||
|
||||
// Device interface
|
||||
|
||||
func (device *VhostUserFSDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
|
||||
func (device *VhostUserFSDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
|
||||
skip, err := device.bumpAttachCount(true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -46,10 +47,10 @@ func (device *VhostUserFSDevice) Attach(devReceiver api.DeviceReceiver) (err err
|
||||
device.DevID = id
|
||||
device.Type = device.DeviceType()
|
||||
|
||||
return devReceiver.AppendDevice(device)
|
||||
return devReceiver.AppendDevice(ctx, device)
|
||||
}
|
||||
|
||||
func (device *VhostUserFSDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
func (device *VhostUserFSDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
|
||||
_, err := device.bumpAttachCount(false)
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
|
||||
@ -27,7 +28,7 @@ type VhostUserNetDevice struct {
|
||||
|
||||
// Attach is standard interface of api.Device, it's used to add device to some
|
||||
// DeviceReceiver
|
||||
func (device *VhostUserNetDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
|
||||
func (device *VhostUserNetDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
|
||||
skip, err := device.bumpAttachCount(true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -52,12 +53,12 @@ func (device *VhostUserNetDevice) Attach(devReceiver api.DeviceReceiver) (err er
|
||||
device.DevID = id
|
||||
device.Type = device.DeviceType()
|
||||
|
||||
return devReceiver.AppendDevice(device)
|
||||
return devReceiver.AppendDevice(ctx, device)
|
||||
}
|
||||
|
||||
// Detach is standard interface of api.Device, it's used to remove device from some
|
||||
// DeviceReceiver
|
||||
func (device *VhostUserNetDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
func (device *VhostUserNetDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
|
||||
_, err := device.bumpAttachCount(false)
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
package drivers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/device/api"
|
||||
@ -27,7 +28,7 @@ type VhostUserSCSIDevice struct {
|
||||
|
||||
// Attach is standard interface of api.Device, it's used to add device to some
|
||||
// DeviceReceiver
|
||||
func (device *VhostUserSCSIDevice) Attach(devReceiver api.DeviceReceiver) (err error) {
|
||||
func (device *VhostUserSCSIDevice) Attach(ctx context.Context, devReceiver api.DeviceReceiver) (err error) {
|
||||
skip, err := device.bumpAttachCount(true)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -52,12 +53,12 @@ func (device *VhostUserSCSIDevice) Attach(devReceiver api.DeviceReceiver) (err e
|
||||
device.DevID = id
|
||||
device.Type = device.DeviceType()
|
||||
|
||||
return devReceiver.AppendDevice(device)
|
||||
return devReceiver.AppendDevice(ctx, device)
|
||||
}
|
||||
|
||||
// Detach is standard interface of api.Device, it's used to remove device from some
|
||||
// DeviceReceiver
|
||||
func (device *VhostUserSCSIDevice) Detach(devReceiver api.DeviceReceiver) error {
|
||||
func (device *VhostUserSCSIDevice) Detach(ctx context.Context, devReceiver api.DeviceReceiver) error {
|
||||
_, err := device.bumpAttachCount(false)
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"sync"
|
||||
@ -189,7 +190,7 @@ func (dm *deviceManager) newDeviceID() (string, error) {
|
||||
return "", ErrIDExhausted
|
||||
}
|
||||
|
||||
func (dm *deviceManager) AttachDevice(id string, dr api.DeviceReceiver) error {
|
||||
func (dm *deviceManager) AttachDevice(ctx context.Context, id string, dr api.DeviceReceiver) error {
|
||||
dm.Lock()
|
||||
defer dm.Unlock()
|
||||
|
||||
@ -198,13 +199,13 @@ func (dm *deviceManager) AttachDevice(id string, dr api.DeviceReceiver) error {
|
||||
return ErrDeviceNotExist
|
||||
}
|
||||
|
||||
if err := d.Attach(dr); err != nil {
|
||||
if err := d.Attach(ctx, dr); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dm *deviceManager) DetachDevice(id string, dr api.DeviceReceiver) error {
|
||||
func (dm *deviceManager) DetachDevice(ctx context.Context, id string, dr api.DeviceReceiver) error {
|
||||
dm.Lock()
|
||||
defer dm.Unlock()
|
||||
|
||||
@ -216,7 +217,7 @@ func (dm *deviceManager) DetachDevice(id string, dr api.DeviceReceiver) error {
|
||||
return ErrDeviceNotAttached
|
||||
}
|
||||
|
||||
if err := d.Detach(dr); err != nil {
|
||||
if err := d.Detach(ctx, dr); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -7,6 +7,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -150,10 +151,10 @@ func TestAttachVFIODevice(t *testing.T) {
|
||||
assert.True(t, ok)
|
||||
|
||||
devReceiver := &api.MockDeviceReceiver{}
|
||||
err = device.Attach(devReceiver)
|
||||
err = device.Attach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(devReceiver)
|
||||
err = device.Detach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@ -178,7 +179,7 @@ func TestAttachGenericDevice(t *testing.T) {
|
||||
err = device.Attach(devReceiver)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(devReceiver)
|
||||
err = device.Detach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@ -200,10 +201,10 @@ func TestAttachBlockDevice(t *testing.T) {
|
||||
_, ok := device.(*drivers.BlockDevice)
|
||||
assert.True(t, ok)
|
||||
|
||||
err = device.Attach(devReceiver)
|
||||
err = device.Attach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(devReceiver)
|
||||
err = device.Detach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// test virtio SCSI driver
|
||||
@ -213,7 +214,7 @@ func TestAttachBlockDevice(t *testing.T) {
|
||||
err = device.Attach(devReceiver)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(devReceiver)
|
||||
err = device.Detach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@ -287,10 +288,10 @@ func TestAttachVhostUserBlkDevice(t *testing.T) {
|
||||
_, ok := device.(*drivers.VhostUserBlkDevice)
|
||||
assert.True(t, ok)
|
||||
|
||||
err = device.Attach(devReceiver)
|
||||
err = device.Attach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(devReceiver)
|
||||
err = device.Detach(context.Background(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@ -309,15 +310,15 @@ func TestAttachDetachDevice(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
|
||||
// attach non-exist device
|
||||
err = dm.AttachDevice("non-exist", devReceiver)
|
||||
err = dm.AttachDevice(context.Background(), "non-exist", devReceiver)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// attach device
|
||||
err = dm.AttachDevice(device.DeviceID(), devReceiver)
|
||||
err = dm.AttachDevice(context.Background(), device.DeviceID(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, device.GetAttachCount(), uint(1), "attach device count should be 1")
|
||||
// attach device again(twice)
|
||||
err = dm.AttachDevice(device.DeviceID(), devReceiver)
|
||||
err = dm.AttachDevice(context.Background(), device.DeviceID(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, device.GetAttachCount(), uint(2), "attach device count should be 2")
|
||||
|
||||
@ -325,15 +326,15 @@ func TestAttachDetachDevice(t *testing.T) {
|
||||
assert.True(t, attached)
|
||||
|
||||
// detach device
|
||||
err = dm.DetachDevice(device.DeviceID(), devReceiver)
|
||||
err = dm.DetachDevice(context.Background(), device.DeviceID(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, device.GetAttachCount(), uint(1), "attach device count should be 1")
|
||||
// detach device again(twice)
|
||||
err = dm.DetachDevice(device.DeviceID(), devReceiver)
|
||||
err = dm.DetachDevice(context.Background(), device.DeviceID(), devReceiver)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, device.GetAttachCount(), uint(0), "attach device count should be 0")
|
||||
// detach device again should report error
|
||||
err = dm.DetachDevice(device.DeviceID(), devReceiver)
|
||||
err = dm.DetachDevice(context.Background(), device.DeviceID(), devReceiver)
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, err, ErrDeviceNotAttached, "")
|
||||
assert.Equal(t, device.GetAttachCount(), uint(0), "attach device count should be 0")
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
persistapi "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist/api"
|
||||
@ -23,10 +24,10 @@ type Endpoint interface {
|
||||
|
||||
SetProperties(NetworkInfo)
|
||||
SetPciPath(vcTypes.PciPath)
|
||||
Attach(*Sandbox) error
|
||||
Detach(netNsCreated bool, netNsPath string) error
|
||||
HotAttach(h hypervisor) error
|
||||
HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error
|
||||
Attach(context.Context, *Sandbox) error
|
||||
Detach(ctx context.Context, netNsCreated bool, netNsPath string) error
|
||||
HotAttach(ctx context.Context, h hypervisor) error
|
||||
HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error
|
||||
|
||||
save() persistapi.NetworkEndpoint
|
||||
load(persistapi.NetworkEndpoint)
|
||||
|
@ -62,8 +62,8 @@ func New(ctx context.Context, count uint, b base.FactoryBase) base.FactoryBase {
|
||||
c.removeFromVmm(vm)
|
||||
case <-closed:
|
||||
c.removeFromVmm(vm)
|
||||
vm.Stop()
|
||||
vm.Disconnect()
|
||||
vm.Stop(ctx)
|
||||
vm.Disconnect(ctx)
|
||||
c.wg.Done()
|
||||
return
|
||||
}
|
||||
|
@ -35,9 +35,9 @@ func (d *direct) GetBaseVM(ctx context.Context, config vc.VMConfig) (*vc.VM, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = vm.Pause()
|
||||
err = vm.Pause(ctx)
|
||||
if err != nil {
|
||||
vm.Stop()
|
||||
vm.Stop(ctx)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ func (f *factory) checkConfig(config vc.VMConfig) error {
|
||||
|
||||
// GetVM returns a working blank VM created by the factory.
|
||||
func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
|
||||
span, _ := trace(ctx, "GetVM")
|
||||
span, ctx := trace(ctx, "GetVM")
|
||||
defer span.End()
|
||||
|
||||
hypervisorConfig := config.HypervisorConfig
|
||||
@ -167,23 +167,23 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
f.log().WithError(err).Error("clean up vm")
|
||||
vm.Stop()
|
||||
vm.Stop(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
err = vm.Resume()
|
||||
err = vm.Resume(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// reseed RNG so that shared memory VMs do not generate same random numbers.
|
||||
err = vm.ReseedRNG()
|
||||
err = vm.ReseedRNG(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// sync guest time since we might have paused it for a long time.
|
||||
err = vm.SyncTime()
|
||||
err = vm.SyncTime(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -191,7 +191,7 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
|
||||
online := false
|
||||
baseConfig := f.base.Config().HypervisorConfig
|
||||
if baseConfig.NumVCPUs < hypervisorConfig.NumVCPUs {
|
||||
err = vm.AddCPUs(hypervisorConfig.NumVCPUs - baseConfig.NumVCPUs)
|
||||
err = vm.AddCPUs(ctx, hypervisorConfig.NumVCPUs - baseConfig.NumVCPUs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -199,7 +199,7 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
|
||||
}
|
||||
|
||||
if baseConfig.MemorySize < hypervisorConfig.MemorySize {
|
||||
err = vm.AddMemory(hypervisorConfig.MemorySize - baseConfig.MemorySize)
|
||||
err = vm.AddMemory(ctx, hypervisorConfig.MemorySize - baseConfig.MemorySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -207,7 +207,7 @@ func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error)
|
||||
}
|
||||
|
||||
if online {
|
||||
err = vm.OnlineCPUMemory()
|
||||
err = vm.OnlineCPUMemory(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -124,9 +124,9 @@ func (t *template) createTemplateVM(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer vm.Stop()
|
||||
defer vm.Stop(ctx)
|
||||
|
||||
if err = vm.Disconnect(); err != nil {
|
||||
if err = vm.Disconnect(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ func (t *template) createTemplateVM(ctx context.Context) error {
|
||||
// created from template, so it worth the invest.
|
||||
time.Sleep(templateWaitForAgent)
|
||||
|
||||
if err = vm.Pause(); err != nil {
|
||||
if err = vm.Pause(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -168,14 +168,14 @@ func (fc *firecracker) Logger() *logrus.Entry {
|
||||
return virtLog.WithField("subsystem", "firecracker")
|
||||
}
|
||||
|
||||
func (fc *firecracker) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if fc.ctx == nil {
|
||||
func (fc *firecracker) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
fc.Logger().WithField("type", "bug").Error("trace called before context set")
|
||||
fc.ctx = context.Background()
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(fc.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("firecracker")}...)
|
||||
|
||||
return span, ctx
|
||||
@ -199,7 +199,8 @@ func (fc *firecracker) truncateID(id string) string {
|
||||
func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error {
|
||||
fc.ctx = ctx
|
||||
|
||||
span, _ := fc.trace("createSandbox")
|
||||
var span otelTrace.Span
|
||||
span, ctx = fc.trace(ctx, "createSandbox")
|
||||
defer span.End()
|
||||
|
||||
//TODO: check validity of the hypervisor config provided
|
||||
@ -241,8 +242,8 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) newFireClient() *client.Firecracker {
|
||||
span, _ := fc.trace("newFireClient")
|
||||
func (fc *firecracker) newFireClient(ctx context.Context) *client.Firecracker {
|
||||
span, _ := fc.trace(ctx, "newFireClient")
|
||||
defer span.End()
|
||||
httpClient := client.NewHTTPClient(strfmt.NewFormats())
|
||||
|
||||
@ -266,8 +267,8 @@ func (fc *firecracker) newFireClient() *client.Firecracker {
|
||||
return httpClient
|
||||
}
|
||||
|
||||
func (fc *firecracker) vmRunning() bool {
|
||||
resp, err := fc.client().Operations.DescribeInstance(nil)
|
||||
func (fc *firecracker) vmRunning(ctx context.Context) bool {
|
||||
resp, err := fc.client(ctx).Operations.DescribeInstance(nil)
|
||||
if err != nil {
|
||||
fc.Logger().WithError(err).Error("getting vm status failed")
|
||||
return false
|
||||
@ -323,8 +324,8 @@ func (fc *firecracker) checkVersion(version string) error {
|
||||
}
|
||||
|
||||
// waitVMMRunning will wait for timeout seconds for the VMM to be up and running.
|
||||
func (fc *firecracker) waitVMMRunning(timeout int) error {
|
||||
span, _ := fc.trace("wait VMM to be running")
|
||||
func (fc *firecracker) waitVMMRunning(ctx context.Context, timeout int) error {
|
||||
span, ctx := fc.trace(ctx, "wait VMM to be running")
|
||||
defer span.End()
|
||||
|
||||
if timeout < 0 {
|
||||
@ -333,7 +334,7 @@ func (fc *firecracker) waitVMMRunning(timeout int) error {
|
||||
|
||||
timeStart := time.Now()
|
||||
for {
|
||||
if fc.vmRunning() {
|
||||
if fc.vmRunning(ctx) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -345,8 +346,8 @@ func (fc *firecracker) waitVMMRunning(timeout int) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcInit(timeout int) error {
|
||||
span, _ := fc.trace("fcInit")
|
||||
func (fc *firecracker) fcInit(ctx context.Context, timeout int) error {
|
||||
span, ctx := fc.trace(ctx, "fcInit")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
@ -411,17 +412,17 @@ func (fc *firecracker) fcInit(timeout int) error {
|
||||
|
||||
fc.info.PID = cmd.Process.Pid
|
||||
fc.firecrackerd = cmd
|
||||
fc.connection = fc.newFireClient()
|
||||
fc.connection = fc.newFireClient(ctx)
|
||||
|
||||
if err := fc.waitVMMRunning(timeout); err != nil {
|
||||
if err := fc.waitVMMRunning(ctx, timeout); err != nil {
|
||||
fc.Logger().WithField("fcInit failed:", err).Debug()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcEnd() (err error) {
|
||||
span, _ := fc.trace("fcEnd")
|
||||
func (fc *firecracker) fcEnd(ctx context.Context) (err error) {
|
||||
span, _ := fc.trace(ctx, "fcEnd")
|
||||
defer span.End()
|
||||
|
||||
fc.Logger().Info("Stopping firecracker VM")
|
||||
@ -465,12 +466,12 @@ func (fc *firecracker) fcEnd() (err error) {
|
||||
return syscall.Kill(pid, syscall.SIGKILL)
|
||||
}
|
||||
|
||||
func (fc *firecracker) client() *client.Firecracker {
|
||||
span, _ := fc.trace("client")
|
||||
func (fc *firecracker) client(ctx context.Context) *client.Firecracker {
|
||||
span, ctx := fc.trace(ctx, "client")
|
||||
defer span.End()
|
||||
|
||||
if fc.connection == nil {
|
||||
fc.connection = fc.newFireClient()
|
||||
fc.connection = fc.newFireClient(ctx)
|
||||
}
|
||||
|
||||
return fc.connection
|
||||
@ -532,8 +533,8 @@ func (fc *firecracker) fcJailResource(src, dst string) (string, error) {
|
||||
return absPath, nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcSetBootSource(path, params string) error {
|
||||
span, _ := fc.trace("fcSetBootSource")
|
||||
func (fc *firecracker) fcSetBootSource(ctx context.Context, path, params string) error {
|
||||
span, _ := fc.trace(ctx, "fcSetBootSource")
|
||||
defer span.End()
|
||||
fc.Logger().WithFields(logrus.Fields{"kernel-path": path,
|
||||
"kernel-params": params}).Debug("fcSetBootSource")
|
||||
@ -553,8 +554,8 @@ func (fc *firecracker) fcSetBootSource(path, params string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcSetVMRootfs(path string) error {
|
||||
span, _ := fc.trace("fcSetVMRootfs")
|
||||
func (fc *firecracker) fcSetVMRootfs(ctx context.Context, path string) error {
|
||||
span, _ := fc.trace(ctx, "fcSetVMRootfs")
|
||||
defer span.End()
|
||||
|
||||
jailedRootfs, err := fc.fcJailResource(path, fcRootfs)
|
||||
@ -580,8 +581,8 @@ func (fc *firecracker) fcSetVMRootfs(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcSetVMBaseConfig(mem int64, vcpus int64, htEnabled bool) {
|
||||
span, _ := fc.trace("fcSetVMBaseConfig")
|
||||
func (fc *firecracker) fcSetVMBaseConfig(ctx context.Context, mem int64, vcpus int64, htEnabled bool) {
|
||||
span, _ := fc.trace(ctx, "fcSetVMBaseConfig")
|
||||
defer span.End()
|
||||
fc.Logger().WithFields(logrus.Fields{"mem": mem,
|
||||
"vcpus": vcpus,
|
||||
@ -596,8 +597,8 @@ func (fc *firecracker) fcSetVMBaseConfig(mem int64, vcpus int64, htEnabled bool)
|
||||
fc.fcConfig.MachineConfig = cfg
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcSetLogger() error {
|
||||
span, _ := fc.trace("fcSetLogger")
|
||||
func (fc *firecracker) fcSetLogger(ctx context.Context) error {
|
||||
span, _ := fc.trace(ctx, "fcSetLogger")
|
||||
defer span.End()
|
||||
|
||||
fcLogLevel := "Error"
|
||||
@ -673,7 +674,7 @@ func (fc *firecracker) fcListenToFifo(fifoName string, consumer fifoConsumer) (s
|
||||
return jailedFifoPath, nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcInitConfiguration() error {
|
||||
func (fc *firecracker) fcInitConfiguration(ctx context.Context) error {
|
||||
// Firecracker API socket(firecracker.socket) is automatically created
|
||||
// under /run dir.
|
||||
err := os.MkdirAll(filepath.Join(fc.jailerRoot, "run"), DirMode)
|
||||
@ -695,7 +696,7 @@ func (fc *firecracker) fcInitConfiguration() error {
|
||||
}
|
||||
}
|
||||
|
||||
fc.fcSetVMBaseConfig(int64(fc.config.MemorySize),
|
||||
fc.fcSetVMBaseConfig(ctx, int64(fc.config.MemorySize),
|
||||
int64(fc.config.NumVCPUs), false)
|
||||
|
||||
kernelPath, err := fc.config.KernelAssetPath()
|
||||
@ -716,7 +717,7 @@ func (fc *firecracker) fcInitConfiguration() error {
|
||||
kernelParams := append(fc.config.KernelParams, fcKernelParams...)
|
||||
strParams := SerializeParams(kernelParams, "=")
|
||||
formattedParams := strings.Join(strParams, " ")
|
||||
if err := fc.fcSetBootSource(kernelPath, formattedParams); err != nil {
|
||||
if err := fc.fcSetBootSource(ctx, kernelPath, formattedParams); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -732,21 +733,21 @@ func (fc *firecracker) fcInitConfiguration() error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := fc.fcSetVMRootfs(image); err != nil {
|
||||
if err := fc.fcSetVMRootfs(ctx, image); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fc.createDiskPool(); err != nil {
|
||||
if err := fc.createDiskPool(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fc.fcSetLogger(); err != nil {
|
||||
if err := fc.fcSetLogger(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fc.state.set(cfReady)
|
||||
for _, d := range fc.pendingDevices {
|
||||
if err := fc.addDevice(d.dev, d.devType); err != nil {
|
||||
if err := fc.addDevice(ctx, d.dev, d.devType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -760,11 +761,11 @@ func (fc *firecracker) fcInitConfiguration() error {
|
||||
// startSandbox will start the hypervisor for the given sandbox.
|
||||
// In the context of firecracker, this will start the hypervisor,
|
||||
// for configuration, but not yet start the actual virtual machine
|
||||
func (fc *firecracker) startSandbox(timeout int) error {
|
||||
span, _ := fc.trace("startSandbox")
|
||||
func (fc *firecracker) startSandbox(ctx context.Context, timeout int) error {
|
||||
span, ctx := fc.trace(ctx, "startSandbox")
|
||||
defer span.End()
|
||||
|
||||
if err := fc.fcInitConfiguration(); err != nil {
|
||||
if err := fc.fcInitConfiguration(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -780,7 +781,7 @@ func (fc *firecracker) startSandbox(timeout int) error {
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fc.fcEnd()
|
||||
fc.fcEnd(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -793,7 +794,7 @@ func (fc *firecracker) startSandbox(timeout int) error {
|
||||
}
|
||||
defer label.SetProcessLabel("")
|
||||
|
||||
err = fc.fcInit(fcTimeout)
|
||||
err = fc.fcInit(ctx, fcTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -812,8 +813,8 @@ func fcDriveIndexToID(i int) string {
|
||||
return "drive_" + strconv.Itoa(i)
|
||||
}
|
||||
|
||||
func (fc *firecracker) createDiskPool() error {
|
||||
span, _ := fc.trace("createDiskPool")
|
||||
func (fc *firecracker) createDiskPool(ctx context.Context) error {
|
||||
span, _ := fc.trace(ctx, "createDiskPool")
|
||||
defer span.End()
|
||||
|
||||
for i := 0; i < fcDiskPoolSize; i++ {
|
||||
@ -850,8 +851,8 @@ func (fc *firecracker) umountResource(jailedPath string) {
|
||||
}
|
||||
|
||||
// cleanup all jail artifacts
|
||||
func (fc *firecracker) cleanupJail() {
|
||||
span, _ := fc.trace("cleanupJail")
|
||||
func (fc *firecracker) cleanupJail(ctx context.Context) {
|
||||
span, _ := fc.trace(ctx, "cleanupJail")
|
||||
defer span.End()
|
||||
|
||||
fc.umountResource(fcKernel)
|
||||
@ -873,14 +874,14 @@ func (fc *firecracker) cleanupJail() {
|
||||
}
|
||||
|
||||
// stopSandbox will stop the Sandbox's VM.
|
||||
func (fc *firecracker) stopSandbox() (err error) {
|
||||
span, _ := fc.trace("stopSandbox")
|
||||
func (fc *firecracker) stopSandbox(ctx context.Context) (err error) {
|
||||
span, ctx := fc.trace(ctx, "stopSandbox")
|
||||
defer span.End()
|
||||
|
||||
return fc.fcEnd()
|
||||
return fc.fcEnd(ctx)
|
||||
}
|
||||
|
||||
func (fc *firecracker) pauseSandbox() error {
|
||||
func (fc *firecracker) pauseSandbox(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -888,12 +889,12 @@ func (fc *firecracker) saveSandbox() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) resumeSandbox() error {
|
||||
func (fc *firecracker) resumeSandbox(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcAddVsock(hvs types.HybridVSock) {
|
||||
span, _ := fc.trace("fcAddVsock")
|
||||
func (fc *firecracker) fcAddVsock(ctx context.Context, hvs types.HybridVSock) {
|
||||
span, _ := fc.trace(ctx, "fcAddVsock")
|
||||
defer span.End()
|
||||
|
||||
udsPath := hvs.UdsPath
|
||||
@ -912,8 +913,8 @@ func (fc *firecracker) fcAddVsock(hvs types.HybridVSock) {
|
||||
fc.fcConfig.Vsock = vsock
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcAddNetDevice(endpoint Endpoint) {
|
||||
span, _ := fc.trace("fcAddNetDevice")
|
||||
func (fc *firecracker) fcAddNetDevice(ctx context.Context, endpoint Endpoint) {
|
||||
span, _ := fc.trace(ctx, "fcAddNetDevice")
|
||||
defer span.End()
|
||||
|
||||
ifaceID := endpoint.Name()
|
||||
@ -968,8 +969,8 @@ func (fc *firecracker) fcAddNetDevice(endpoint Endpoint) {
|
||||
fc.fcConfig.NetworkInterfaces = append(fc.fcConfig.NetworkInterfaces, ifaceCfg)
|
||||
}
|
||||
|
||||
func (fc *firecracker) fcAddBlockDrive(drive config.BlockDrive) error {
|
||||
span, _ := fc.trace("fcAddBlockDrive")
|
||||
func (fc *firecracker) fcAddBlockDrive(ctx context.Context, drive config.BlockDrive) error {
|
||||
span, _ := fc.trace(ctx, "fcAddBlockDrive")
|
||||
defer span.End()
|
||||
|
||||
driveID := drive.ID
|
||||
@ -994,8 +995,8 @@ func (fc *firecracker) fcAddBlockDrive(drive config.BlockDrive) error {
|
||||
}
|
||||
|
||||
// Firecracker supports replacing the host drive used once the VM has booted up
|
||||
func (fc *firecracker) fcUpdateBlockDrive(path, id string) error {
|
||||
span, _ := fc.trace("fcUpdateBlockDrive")
|
||||
func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string) error {
|
||||
span, ctx := fc.trace(ctx, "fcUpdateBlockDrive")
|
||||
defer span.End()
|
||||
|
||||
// Use the global block index as an index into the pool of the devices
|
||||
@ -1009,7 +1010,7 @@ func (fc *firecracker) fcUpdateBlockDrive(path, id string) error {
|
||||
}
|
||||
|
||||
driveParams.SetBody(driveFc)
|
||||
if _, err := fc.client().Operations.PatchGuestDriveByID(driveParams); err != nil {
|
||||
if _, err := fc.client(ctx).Operations.PatchGuestDriveByID(driveParams); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1018,8 +1019,8 @@ func (fc *firecracker) fcUpdateBlockDrive(path, id string) error {
|
||||
|
||||
// addDevice will add extra devices to firecracker. Limited to configure before the
|
||||
// virtual machine starts. Devices include drivers and network interfaces only.
|
||||
func (fc *firecracker) addDevice(devInfo interface{}, devType deviceType) error {
|
||||
span, _ := fc.trace("addDevice")
|
||||
func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
|
||||
span, ctx := fc.trace(ctx, "addDevice")
|
||||
defer span.End()
|
||||
|
||||
fc.state.RLock()
|
||||
@ -1039,13 +1040,13 @@ func (fc *firecracker) addDevice(devInfo interface{}, devType deviceType) error
|
||||
switch v := devInfo.(type) {
|
||||
case Endpoint:
|
||||
fc.Logger().WithField("device-type-endpoint", devInfo).Info("Adding device")
|
||||
fc.fcAddNetDevice(v)
|
||||
fc.fcAddNetDevice(ctx, v)
|
||||
case config.BlockDrive:
|
||||
fc.Logger().WithField("device-type-blockdrive", devInfo).Info("Adding device")
|
||||
err = fc.fcAddBlockDrive(v)
|
||||
err = fc.fcAddBlockDrive(ctx, v)
|
||||
case types.HybridVSock:
|
||||
fc.Logger().WithField("device-type-hybrid-vsock", devInfo).Info("Adding device")
|
||||
fc.fcAddVsock(v)
|
||||
fc.fcAddVsock(ctx, v)
|
||||
default:
|
||||
fc.Logger().WithField("unknown-device-type", devInfo).Error("Adding device")
|
||||
}
|
||||
@ -1055,7 +1056,7 @@ func (fc *firecracker) addDevice(devInfo interface{}, devType deviceType) error
|
||||
|
||||
// hotplugBlockDevice supported in Firecracker VMM
|
||||
// hot add or remove a block device.
|
||||
func (fc *firecracker) hotplugBlockDevice(drive config.BlockDrive, op operation) (interface{}, error) {
|
||||
func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.BlockDrive, op operation) (interface{}, error) {
|
||||
var path string
|
||||
var err error
|
||||
driveID := fcDriveIndexToID(drive.Index)
|
||||
@ -1075,17 +1076,17 @@ func (fc *firecracker) hotplugBlockDevice(drive config.BlockDrive, op operation)
|
||||
path = filepath.Join(fc.jailerRoot, driveID)
|
||||
}
|
||||
|
||||
return nil, fc.fcUpdateBlockDrive(path, driveID)
|
||||
return nil, fc.fcUpdateBlockDrive(ctx, path, driveID)
|
||||
}
|
||||
|
||||
// hotplugAddDevice supported in Firecracker VMM
|
||||
func (fc *firecracker) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := fc.trace("hotplugAddDevice")
|
||||
func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, ctx := fc.trace(ctx, "hotplugAddDevice")
|
||||
defer span.End()
|
||||
|
||||
switch devType {
|
||||
case blockDev:
|
||||
return fc.hotplugBlockDevice(*devInfo.(*config.BlockDrive), addDevice)
|
||||
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), addDevice)
|
||||
default:
|
||||
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
|
||||
"deviceType": devType}).Warn("hotplugAddDevice: unsupported device")
|
||||
@ -1095,13 +1096,13 @@ func (fc *firecracker) hotplugAddDevice(devInfo interface{}, devType deviceType)
|
||||
}
|
||||
|
||||
// hotplugRemoveDevice supported in Firecracker VMM
|
||||
func (fc *firecracker) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := fc.trace("hotplugRemoveDevice")
|
||||
func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, ctx := fc.trace(ctx, "hotplugRemoveDevice")
|
||||
defer span.End()
|
||||
|
||||
switch devType {
|
||||
case blockDev:
|
||||
return fc.hotplugBlockDevice(*devInfo.(*config.BlockDrive), removeDevice)
|
||||
return fc.hotplugBlockDevice(ctx, *devInfo.(*config.BlockDrive), removeDevice)
|
||||
default:
|
||||
fc.Logger().WithFields(logrus.Fields{"devInfo": devInfo,
|
||||
"deviceType": devType}).Error("hotplugRemoveDevice: unsupported device")
|
||||
@ -1112,7 +1113,7 @@ func (fc *firecracker) hotplugRemoveDevice(devInfo interface{}, devType deviceTy
|
||||
|
||||
// getSandboxConsole builds the path of the console where we can read
|
||||
// logs coming from the sandbox.
|
||||
func (fc *firecracker) getSandboxConsole(id string) (string, string, error) {
|
||||
func (fc *firecracker) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
|
||||
master, slave, err := console.NewPty()
|
||||
if err != nil {
|
||||
fc.Logger().Debugf("Error create pseudo tty: %v", err)
|
||||
@ -1123,13 +1124,13 @@ func (fc *firecracker) getSandboxConsole(id string) (string, string, error) {
|
||||
return consoleProtoPty, slave, nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) disconnect() {
|
||||
func (fc *firecracker) disconnect(ctx context.Context) {
|
||||
fc.state.set(notReady)
|
||||
}
|
||||
|
||||
// Adds all capabilities supported by firecracker implementation of hypervisor interface
|
||||
func (fc *firecracker) capabilities() types.Capabilities {
|
||||
span, _ := fc.trace("capabilities")
|
||||
func (fc *firecracker) capabilities(ctx context.Context) types.Capabilities {
|
||||
span, _ := fc.trace(ctx, "capabilities")
|
||||
defer span.End()
|
||||
var caps types.Capabilities
|
||||
caps.SetBlockDeviceHotplugSupport()
|
||||
@ -1141,11 +1142,11 @@ func (fc *firecracker) hypervisorConfig() HypervisorConfig {
|
||||
return fc.config
|
||||
}
|
||||
|
||||
func (fc *firecracker) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
func (fc *firecracker) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
return 0, memoryDevice{}, nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
func (fc *firecracker) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
@ -1153,7 +1154,7 @@ func (fc *firecracker) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCP
|
||||
//
|
||||
// As suggested by https://github.com/firecracker-microvm/firecracker/issues/718,
|
||||
// let's use `ps -T -p <pid>` to get fc vcpu info.
|
||||
func (fc *firecracker) getThreadIDs() (vcpuThreadIDs, error) {
|
||||
func (fc *firecracker) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
|
||||
var vcpuInfo vcpuThreadIDs
|
||||
|
||||
vcpuInfo.vcpus = make(map[int]int)
|
||||
@ -1187,8 +1188,8 @@ func (fc *firecracker) getThreadIDs() (vcpuThreadIDs, error) {
|
||||
return vcpuInfo, nil
|
||||
}
|
||||
|
||||
func (fc *firecracker) cleanup() error {
|
||||
fc.cleanupJail()
|
||||
func (fc *firecracker) cleanup(ctx context.Context) error {
|
||||
fc.cleanupJail(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1200,7 +1201,7 @@ func (fc *firecracker) fromGrpc(ctx context.Context, hypervisorConfig *Hyperviso
|
||||
return errors.New("firecracker is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (fc *firecracker) toGrpc() ([]byte, error) {
|
||||
func (fc *firecracker) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("firecracker is not supported by VM cache")
|
||||
}
|
||||
|
||||
|
@ -786,27 +786,27 @@ func generateVMSocket(id string, vmStogarePath string) (interface{}, error) {
|
||||
// The default hypervisor implementation is Qemu.
|
||||
type hypervisor interface {
|
||||
createSandbox(ctx context.Context, id string, networkNS NetworkNamespace, hypervisorConfig *HypervisorConfig) error
|
||||
startSandbox(timeout int) error
|
||||
stopSandbox() error
|
||||
pauseSandbox() error
|
||||
startSandbox(ctx context.Context, timeout int) error
|
||||
stopSandbox(ctx context.Context) error
|
||||
pauseSandbox(ctx context.Context) error
|
||||
saveSandbox() error
|
||||
resumeSandbox() error
|
||||
addDevice(devInfo interface{}, devType deviceType) error
|
||||
hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error)
|
||||
hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error)
|
||||
resizeMemory(memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error)
|
||||
resizeVCPUs(vcpus uint32) (uint32, uint32, error)
|
||||
getSandboxConsole(sandboxID string) (string, string, error)
|
||||
disconnect()
|
||||
capabilities() types.Capabilities
|
||||
resumeSandbox(ctx context.Context) error
|
||||
addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error
|
||||
hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error)
|
||||
hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error)
|
||||
resizeMemory(ctx context.Context, memMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error)
|
||||
resizeVCPUs(ctx context.Context, vcpus uint32) (uint32, uint32, error)
|
||||
getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error)
|
||||
disconnect(ctx context.Context)
|
||||
capabilities(ctx context.Context) types.Capabilities
|
||||
hypervisorConfig() HypervisorConfig
|
||||
getThreadIDs() (vcpuThreadIDs, error)
|
||||
cleanup() error
|
||||
getThreadIDs(ctx context.Context) (vcpuThreadIDs, error)
|
||||
cleanup(ctx context.Context) error
|
||||
// getPids returns a slice of hypervisor related process ids.
|
||||
// The hypervisor pid must be put at index 0.
|
||||
getPids() []int
|
||||
fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig, j []byte) error
|
||||
toGrpc() ([]byte, error)
|
||||
toGrpc(ctx context.Context) ([]byte, error)
|
||||
check() error
|
||||
|
||||
save() persistapi.HypervisorState
|
||||
|
@ -38,44 +38,44 @@ type VCSandbox interface {
|
||||
ID() string
|
||||
SetAnnotations(annotations map[string]string) error
|
||||
|
||||
Stats() (SandboxStats, error)
|
||||
Stats(ctx context.Context) (SandboxStats, error)
|
||||
|
||||
Start() error
|
||||
Stop(force bool) error
|
||||
Release() error
|
||||
Monitor() (chan error, error)
|
||||
Delete() error
|
||||
Start(ctx context.Context) error
|
||||
Stop(ctx context.Context, force bool) error
|
||||
Release(ctx context.Context) error
|
||||
Monitor(ctx context.Context) (chan error, error)
|
||||
Delete(ctx context.Context) error
|
||||
Status() SandboxStatus
|
||||
CreateContainer(contConfig ContainerConfig) (VCContainer, error)
|
||||
DeleteContainer(contID string) (VCContainer, error)
|
||||
StartContainer(containerID string) (VCContainer, error)
|
||||
StopContainer(containerID string, force bool) (VCContainer, error)
|
||||
KillContainer(containerID string, signal syscall.Signal, all bool) error
|
||||
CreateContainer(ctx context.Context, contConfig ContainerConfig) (VCContainer, error)
|
||||
DeleteContainer(ctx context.Context, contID string) (VCContainer, error)
|
||||
StartContainer(ctx context.Context, containerID string) (VCContainer, error)
|
||||
StopContainer(ctx context.Context, containerID string, force bool) (VCContainer, error)
|
||||
KillContainer(ctx context.Context, containerID string, signal syscall.Signal, all bool) error
|
||||
StatusContainer(containerID string) (ContainerStatus, error)
|
||||
StatsContainer(containerID string) (ContainerStats, error)
|
||||
PauseContainer(containerID string) error
|
||||
ResumeContainer(containerID string) error
|
||||
EnterContainer(containerID string, cmd types.Cmd) (VCContainer, *Process, error)
|
||||
UpdateContainer(containerID string, resources specs.LinuxResources) error
|
||||
ProcessListContainer(containerID string, options ProcessListOptions) (ProcessList, error)
|
||||
WaitProcess(containerID, processID string) (int32, error)
|
||||
SignalProcess(containerID, processID string, signal syscall.Signal, all bool) error
|
||||
WinsizeProcess(containerID, processID string, height, width uint32) error
|
||||
StatsContainer(ctx context.Context, containerID string) (ContainerStats, error)
|
||||
PauseContainer(ctx context.Context, containerID string) error
|
||||
ResumeContainer(ctx context.Context, containerID string) error
|
||||
EnterContainer(ctx context.Context, containerID string, cmd types.Cmd) (VCContainer, *Process, error)
|
||||
UpdateContainer(ctx context.Context, containerID string, resources specs.LinuxResources) error
|
||||
ProcessListContainer(ctx context.Context, containerID string, options ProcessListOptions) (ProcessList, error)
|
||||
WaitProcess(ctx context.Context, containerID, processID string) (int32, error)
|
||||
SignalProcess(ctx context.Context, containerID, processID string, signal syscall.Signal, all bool) error
|
||||
WinsizeProcess(ctx context.Context, containerID, processID string, height, width uint32) error
|
||||
IOStream(containerID, processID string) (io.WriteCloser, io.Reader, io.Reader, error)
|
||||
|
||||
AddDevice(info config.DeviceInfo) (api.Device, error)
|
||||
AddDevice(ctx context.Context, info config.DeviceInfo) (api.Device, error)
|
||||
|
||||
AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error)
|
||||
RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error)
|
||||
ListInterfaces() ([]*pbTypes.Interface, error)
|
||||
UpdateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error)
|
||||
ListRoutes() ([]*pbTypes.Route, error)
|
||||
AddInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error)
|
||||
RemoveInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error)
|
||||
ListInterfaces(ctx context.Context) ([]*pbTypes.Interface, error)
|
||||
UpdateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error)
|
||||
ListRoutes(ctx context.Context) ([]*pbTypes.Route, error)
|
||||
|
||||
GetOOMEvent() (string, error)
|
||||
GetOOMEvent(ctx context.Context) (string, error)
|
||||
GetHypervisorPid() (int, error)
|
||||
|
||||
UpdateRuntimeMetrics() error
|
||||
GetAgentMetrics() (string, error)
|
||||
GetAgentMetrics(ctx context.Context) (string, error)
|
||||
GetAgentURL() (string, error)
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
@ -58,7 +59,8 @@ func (s *stdinStream) Write(data []byte) (n int, err error) {
|
||||
return 0, errors.New("stream closed")
|
||||
}
|
||||
|
||||
return s.sandbox.agent.writeProcessStdin(s.container, s.process, data)
|
||||
// can not pass context to Write(), so use background context
|
||||
return s.sandbox.agent.writeProcessStdin(context.Background(), s.container, s.process, data)
|
||||
}
|
||||
|
||||
func (s *stdinStream) Close() error {
|
||||
@ -66,7 +68,8 @@ func (s *stdinStream) Close() error {
|
||||
return errors.New("stream closed")
|
||||
}
|
||||
|
||||
err := s.sandbox.agent.closeProcessStdin(s.container, s.process)
|
||||
// can not pass context to Close(), so use background context
|
||||
err := s.sandbox.agent.closeProcessStdin(context.Background(), s.container, s.process)
|
||||
if err == nil {
|
||||
s.closed = true
|
||||
}
|
||||
@ -79,7 +82,8 @@ func (s *stdoutStream) Read(data []byte) (n int, err error) {
|
||||
return 0, errors.New("stream closed")
|
||||
}
|
||||
|
||||
return s.sandbox.agent.readProcessStdout(s.container, s.process, data)
|
||||
// can not pass context to Read(), so use background context
|
||||
return s.sandbox.agent.readProcessStdout(context.Background(), s.container, s.process, data)
|
||||
}
|
||||
|
||||
func (s *stderrStream) Read(data []byte) (n int, err error) {
|
||||
@ -87,5 +91,6 @@ func (s *stderrStream) Read(data []byte) (n int, err error) {
|
||||
return 0, errors.New("stream closed")
|
||||
}
|
||||
|
||||
return s.sandbox.agent.readProcessStderr(s.container, s.process, data)
|
||||
// can not pass context to Read(), so use background context
|
||||
return s.sandbox.agent.readProcessStderr(context.Background(), s.container, s.process, data)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
@ -90,19 +91,19 @@ func (endpoint *IPVlanEndpoint) NetworkPair() *NetworkInterfacePair {
|
||||
|
||||
// Attach for virtual endpoint bridges the network pair and adds the
|
||||
// tap interface of the network pair to the hypervisor.
|
||||
func (endpoint *IPVlanEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *IPVlanEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
h := s.hypervisor
|
||||
if err := xConnectVMNetwork(endpoint, h); err != nil {
|
||||
if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
|
||||
networkLogger().WithError(err).Error("Error bridging virtual ep")
|
||||
return err
|
||||
}
|
||||
|
||||
return h.addDevice(endpoint, netDev)
|
||||
return h.addDevice(ctx, endpoint, netDev)
|
||||
}
|
||||
|
||||
// Detach for the virtual endpoint tears down the tap and bridge
|
||||
// created for the veth interface.
|
||||
func (endpoint *IPVlanEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *IPVlanEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
// The network namespace would have been deleted at this point
|
||||
// if it has not been created by virtcontainers.
|
||||
if !netNsCreated {
|
||||
@ -115,12 +116,12 @@ func (endpoint *IPVlanEndpoint) Detach(netNsCreated bool, netNsPath string) erro
|
||||
}
|
||||
|
||||
// HotAttach for physical endpoint not supported yet
|
||||
func (endpoint *IPVlanEndpoint) HotAttach(h hypervisor) error {
|
||||
func (endpoint *IPVlanEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
return fmt.Errorf("IPVlanEndpoint does not support Hot attach")
|
||||
}
|
||||
|
||||
// HotDetach for physical endpoint not supported yet
|
||||
func (endpoint *IPVlanEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *IPVlanEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
return fmt.Errorf("IPVlanEndpoint does not support Hot detach")
|
||||
}
|
||||
|
||||
|
@ -236,14 +236,14 @@ type kataAgent struct {
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (k *kataAgent) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if k.ctx == nil {
|
||||
func (k *kataAgent) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
k.Logger().WithField("type", "bug").Error("trace called before context set")
|
||||
k.ctx = context.Background()
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(k.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes([]label.KeyValue{label.Key("subsystem").String("agent"), label.Key("type").String("kata")}...)
|
||||
|
||||
return span, ctx
|
||||
@ -332,7 +332,7 @@ func (k *kataAgent) init(ctx context.Context, sandbox *Sandbox, config KataAgent
|
||||
// save
|
||||
k.ctx = sandbox.ctx
|
||||
|
||||
span, _ := k.trace("init")
|
||||
span, _ := k.trace(ctx, "init")
|
||||
defer span.End()
|
||||
|
||||
disableVMShutdown = k.handleTraceSettings(config)
|
||||
@ -425,7 +425,7 @@ func cleanupSandboxBindMounts(sandbox *Sandbox) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interface{}) error {
|
||||
func (k *kataAgent) configure(ctx context.Context, h hypervisor, id, sharePath string, config interface{}) error {
|
||||
err := k.internalConfigure(h, id, config)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -433,11 +433,11 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interfa
|
||||
|
||||
switch s := k.vmSocket.(type) {
|
||||
case types.VSock:
|
||||
if err = h.addDevice(s, vSockPCIDev); err != nil {
|
||||
if err = h.addDevice(ctx, s, vSockPCIDev); err != nil {
|
||||
return err
|
||||
}
|
||||
case types.HybridVSock:
|
||||
err = h.addDevice(s, hybridVirtioVsockDev)
|
||||
err = h.addDevice(ctx, s, hybridVirtioVsockDev)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -448,7 +448,7 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interfa
|
||||
|
||||
// Neither create shared directory nor add 9p device if hypervisor
|
||||
// doesn't support filesystem sharing.
|
||||
caps := h.capabilities()
|
||||
caps := h.capabilities(ctx)
|
||||
if !caps.IsFsSharingSupported() {
|
||||
return nil
|
||||
}
|
||||
@ -464,14 +464,14 @@ func (k *kataAgent) configure(h hypervisor, id, sharePath string, config interfa
|
||||
return err
|
||||
}
|
||||
|
||||
return h.addDevice(sharedVolume, fsDev)
|
||||
return h.addDevice(ctx, sharedVolume, fsDev)
|
||||
}
|
||||
|
||||
func (k *kataAgent) configureFromGrpc(h hypervisor, id string, config interface{}) error {
|
||||
return k.internalConfigure(h, id, config)
|
||||
}
|
||||
|
||||
func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error {
|
||||
func (k *kataAgent) setupSharedPath(ctx context.Context, sandbox *Sandbox) error {
|
||||
// create shared path structure
|
||||
sharePath := getSharePath(sandbox.id)
|
||||
mountPath := getMountPath(sandbox.id)
|
||||
@ -483,7 +483,7 @@ func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error {
|
||||
}
|
||||
|
||||
// slave mount so that future mountpoints under mountPath are shown in sharePath as well
|
||||
if err := bindMount(context.Background(), mountPath, sharePath, true, "slave"); err != nil {
|
||||
if err := bindMount(ctx, mountPath, sharePath, true, "slave"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -495,14 +495,14 @@ func (k *kataAgent) setupSharedPath(sandbox *Sandbox) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) createSandbox(sandbox *Sandbox) error {
|
||||
span, _ := k.trace("createSandbox")
|
||||
func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
span, ctx := k.trace(ctx, "createSandbox")
|
||||
defer span.End()
|
||||
|
||||
if err := k.setupSharedPath(sandbox); err != nil {
|
||||
if err := k.setupSharedPath(ctx, sandbox); err != nil {
|
||||
return err
|
||||
}
|
||||
return k.configure(sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), sandbox.config.AgentConfig)
|
||||
return k.configure(ctx, sandbox.hypervisor, sandbox.id, getSharePath(sandbox.id), sandbox.config.AgentConfig)
|
||||
}
|
||||
|
||||
func cmdToKataProcess(cmd types.Cmd) (process *grpc.Process, err error) {
|
||||
@ -582,8 +582,8 @@ func cmdEnvsToStringSlice(ev []types.EnvVar) []string {
|
||||
return env
|
||||
}
|
||||
|
||||
func (k *kataAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {
|
||||
span, _ := k.trace("exec")
|
||||
func (k *kataAgent) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {
|
||||
span, ctx := k.trace(ctx, "exec")
|
||||
defer span.End()
|
||||
|
||||
var kataProcess *grpc.Process
|
||||
@ -599,19 +599,19 @@ func (k *kataAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process
|
||||
Process: kataProcess,
|
||||
}
|
||||
|
||||
if _, err := k.sendReq(req); err != nil {
|
||||
if _, err := k.sendReq(ctx, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buildProcessFromExecID(req.ExecId)
|
||||
}
|
||||
|
||||
func (k *kataAgent) updateInterface(ifc *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
func (k *kataAgent) updateInterface(ctx context.Context, ifc *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
// send update interface request
|
||||
ifcReq := &grpc.UpdateInterfaceRequest{
|
||||
Interface: ifc,
|
||||
}
|
||||
resultingInterface, err := k.sendReq(ifcReq)
|
||||
resultingInterface, err := k.sendReq(ctx, ifcReq)
|
||||
if err != nil {
|
||||
k.Logger().WithFields(logrus.Fields{
|
||||
"interface-requested": fmt.Sprintf("%+v", ifc),
|
||||
@ -624,23 +624,23 @@ func (k *kataAgent) updateInterface(ifc *pbTypes.Interface) (*pbTypes.Interface,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (k *kataAgent) updateInterfaces(interfaces []*pbTypes.Interface) error {
|
||||
func (k *kataAgent) updateInterfaces(ctx context.Context, interfaces []*pbTypes.Interface) error {
|
||||
for _, ifc := range interfaces {
|
||||
if _, err := k.updateInterface(ifc); err != nil {
|
||||
if _, err := k.updateInterface(ctx, ifc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
func (k *kataAgent) updateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
if routes != nil {
|
||||
routesReq := &grpc.UpdateRoutesRequest{
|
||||
Routes: &grpc.Routes{
|
||||
Routes: routes,
|
||||
},
|
||||
}
|
||||
resultingRoutes, err := k.sendReq(routesReq)
|
||||
resultingRoutes, err := k.sendReq(ctx, routesReq)
|
||||
if err != nil {
|
||||
k.Logger().WithFields(logrus.Fields{
|
||||
"routes-requested": fmt.Sprintf("%+v", routes),
|
||||
@ -656,14 +656,14 @@ func (k *kataAgent) updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, err
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) addARPNeighbors(neighs []*pbTypes.ARPNeighbor) error {
|
||||
func (k *kataAgent) addARPNeighbors(ctx context.Context, neighs []*pbTypes.ARPNeighbor) error {
|
||||
if neighs != nil {
|
||||
neighsReq := &grpc.AddARPNeighborsRequest{
|
||||
Neighbors: &grpc.ARPNeighbors{
|
||||
ARPNeighbors: neighs,
|
||||
},
|
||||
}
|
||||
_, err := k.sendReq(neighsReq)
|
||||
_, err := k.sendReq(ctx, neighsReq)
|
||||
if err != nil {
|
||||
if grpcStatus.Convert(err).Code() == codes.Unimplemented {
|
||||
k.Logger().WithFields(logrus.Fields{
|
||||
@ -680,9 +680,9 @@ func (k *kataAgent) addARPNeighbors(neighs []*pbTypes.ARPNeighbor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) listInterfaces() ([]*pbTypes.Interface, error) {
|
||||
func (k *kataAgent) listInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
|
||||
req := &grpc.ListInterfacesRequest{}
|
||||
resultingInterfaces, err := k.sendReq(req)
|
||||
resultingInterfaces, err := k.sendReq(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -693,9 +693,9 @@ func (k *kataAgent) listInterfaces() ([]*pbTypes.Interface, error) {
|
||||
return resultInterfaces.Interfaces, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) listRoutes() ([]*pbTypes.Route, error) {
|
||||
func (k *kataAgent) listRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
|
||||
req := &grpc.ListRoutesRequest{}
|
||||
resultingRoutes, err := k.sendReq(req)
|
||||
resultingRoutes, err := k.sendReq(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -754,8 +754,8 @@ func (k *kataAgent) getDNS(sandbox *Sandbox) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
|
||||
span, _ := k.trace("startSandbox")
|
||||
func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
span, ctx := k.trace(ctx, "startSandbox")
|
||||
defer span.End()
|
||||
|
||||
if err := k.setAgentURL(); err != nil {
|
||||
@ -773,7 +773,7 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
|
||||
}
|
||||
|
||||
// check grpc server is serving
|
||||
if err = k.check(); err != nil {
|
||||
if err = k.check(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -784,17 +784,17 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = k.updateInterfaces(interfaces); err != nil {
|
||||
if err = k.updateInterfaces(ctx, interfaces); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = k.updateRoutes(routes); err != nil {
|
||||
if _, err = k.updateRoutes(ctx, routes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = k.addARPNeighbors(neighs); err != nil {
|
||||
if err = k.addARPNeighbors(ctx, neighs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
storages := setupStorages(sandbox)
|
||||
storages := setupStorages(ctx, sandbox)
|
||||
|
||||
kmodules := setupKernelModules(k.kmodules)
|
||||
|
||||
@ -808,13 +808,13 @@ func (k *kataAgent) startSandbox(sandbox *Sandbox) error {
|
||||
KernelModules: kmodules,
|
||||
}
|
||||
|
||||
_, err = k.sendReq(req)
|
||||
_, err = k.sendReq(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if k.dynamicTracing {
|
||||
_, err = k.sendReq(&grpc.StartTracingRequest{})
|
||||
_, err = k.sendReq(ctx, &grpc.StartTracingRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -844,9 +844,9 @@ func setupKernelModules(kmodules []string) []*grpc.KernelModule {
|
||||
return modules
|
||||
}
|
||||
|
||||
func setupStorages(sandbox *Sandbox) []*grpc.Storage {
|
||||
func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage {
|
||||
storages := []*grpc.Storage{}
|
||||
caps := sandbox.hypervisor.capabilities()
|
||||
caps := sandbox.hypervisor.capabilities(ctx)
|
||||
|
||||
// append 9p shared volume to storages only if filesystem sharing is supported
|
||||
if caps.IsFsSharingSupported() {
|
||||
@ -909,18 +909,18 @@ func setupStorages(sandbox *Sandbox) []*grpc.Storage {
|
||||
return storages
|
||||
}
|
||||
|
||||
func (k *kataAgent) stopSandbox(sandbox *Sandbox) error {
|
||||
span, _ := k.trace("stopSandbox")
|
||||
func (k *kataAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
span, ctx := k.trace(ctx, "stopSandbox")
|
||||
defer span.End()
|
||||
|
||||
req := &grpc.DestroySandboxRequest{}
|
||||
|
||||
if _, err := k.sendReq(req); err != nil {
|
||||
if _, err := k.sendReq(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if k.dynamicTracing {
|
||||
_, err := k.sendReq(&grpc.StopTracingRequest{})
|
||||
_, err := k.sendReq(ctx, &grpc.StopTracingRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1187,19 +1187,19 @@ func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*gr
|
||||
// been performed before the container creation failed.
|
||||
// - Unmount container volumes.
|
||||
// - Unmount container rootfs.
|
||||
func (k *kataAgent) rollbackFailingContainerCreation(c *Container) {
|
||||
func (k *kataAgent) rollbackFailingContainerCreation(ctx context.Context, c *Container) {
|
||||
if c != nil {
|
||||
if err2 := c.unmountHostMounts(); err2 != nil {
|
||||
if err2 := c.unmountHostMounts(ctx); err2 != nil {
|
||||
k.Logger().WithError(err2).Error("rollback failed unmountHostMounts()")
|
||||
}
|
||||
|
||||
if err2 := bindUnmountContainerRootfs(k.ctx, getMountPath(c.sandbox.id), c.id); err2 != nil {
|
||||
if err2 := bindUnmountContainerRootfs(ctx, getMountPath(c.sandbox.id), c.id); err2 != nil {
|
||||
k.Logger().WithError(err2).Error("rollback failed bindUnmountContainerRootfs()")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
|
||||
func (k *kataAgent) buildContainerRootfs(ctx context.Context, sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
|
||||
if c.state.Fstype != "" && c.state.BlockDeviceID != "" {
|
||||
// The rootfs storage volume represents the container rootfs
|
||||
// mount point inside the guest.
|
||||
@ -1264,15 +1264,15 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
|
||||
// With virtiofs/9pfs we don't need to ask the agent to mount the rootfs as the shared directory
|
||||
// (kataGuestSharedDir) is already mounted in the guest. We only need to mount the rootfs from
|
||||
// the host and it will show up in the guest.
|
||||
if err := bindMountContainerRootfs(k.ctx, getMountPath(sandbox.id), c.id, c.rootFs.Target, false); err != nil {
|
||||
if err := bindMountContainerRootfs(ctx, getMountPath(sandbox.id), c.id, c.rootFs.Target, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) {
|
||||
span, _ := k.trace("createContainer")
|
||||
func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (p *Process, err error) {
|
||||
span, ctx := k.trace(ctx, "createContainer")
|
||||
defer span.End()
|
||||
|
||||
var ctrStorages []*grpc.Storage
|
||||
@ -1288,14 +1288,14 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
defer func() {
|
||||
if err != nil {
|
||||
k.Logger().WithError(err).Error("createContainer failed")
|
||||
k.rollbackFailingContainerCreation(c)
|
||||
k.rollbackFailingContainerCreation(ctx, c)
|
||||
}
|
||||
}()
|
||||
|
||||
// setup rootfs -- if its block based, we'll receive a non-nil storage object representing
|
||||
// the block device for the rootfs, which us utilized for mounting in the guest. This'll be handled
|
||||
// already for non-block based rootfs
|
||||
if rootfs, err = k.buildContainerRootfs(sandbox, c, rootPathParent); err != nil {
|
||||
if rootfs, err = k.buildContainerRootfs(ctx, sandbox, c, rootPathParent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1313,7 +1313,7 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
}
|
||||
|
||||
// Handle container mounts
|
||||
newMounts, ignoredMounts, err := c.mountSharedDirMounts(getSharePath(sandbox.id), getMountPath(sandbox.id), kataGuestSharedDir())
|
||||
newMounts, ignoredMounts, err := c.mountSharedDirMounts(ctx, getSharePath(sandbox.id), getMountPath(sandbox.id), kataGuestSharedDir())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1380,7 +1380,7 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process,
|
||||
SandboxPidns: sharedPidNs,
|
||||
}
|
||||
|
||||
if _, err = k.sendReq(req); err != nil {
|
||||
if _, err = k.sendReq(ctx, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1601,27 +1601,27 @@ func (k *kataAgent) handlePidNamespace(grpcSpec *grpc.Spec, sandbox *Sandbox) bo
|
||||
return sharedPidNs
|
||||
}
|
||||
|
||||
func (k *kataAgent) startContainer(sandbox *Sandbox, c *Container) error {
|
||||
span, _ := k.trace("startContainer")
|
||||
func (k *kataAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error {
|
||||
span, ctx := k.trace(ctx, "startContainer")
|
||||
defer span.End()
|
||||
|
||||
req := &grpc.StartContainerRequest{
|
||||
ContainerId: c.id,
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
_, err := k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) stopContainer(sandbox *Sandbox, c Container) error {
|
||||
span, _ := k.trace("stopContainer")
|
||||
func (k *kataAgent) stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
|
||||
span, ctx := k.trace(ctx, "stopContainer")
|
||||
defer span.End()
|
||||
|
||||
_, err := k.sendReq(&grpc.RemoveContainerRequest{ContainerId: c.id})
|
||||
_, err := k.sendReq(ctx, &grpc.RemoveContainerRequest{ContainerId: c.id})
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error {
|
||||
func (k *kataAgent) signalProcess(ctx context.Context, c *Container, processID string, signal syscall.Signal, all bool) error {
|
||||
execID := processID
|
||||
if all {
|
||||
// kata agent uses empty execId to signal all processes in a container
|
||||
@ -1633,11 +1633,11 @@ func (k *kataAgent) signalProcess(c *Container, processID string, signal syscall
|
||||
Signal: uint32(signal),
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
_, err := k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) winsizeProcess(c *Container, processID string, height, width uint32) error {
|
||||
func (k *kataAgent) winsizeProcess(ctx context.Context, c *Container, processID string, height, width uint32) error {
|
||||
req := &grpc.TtyWinResizeRequest{
|
||||
ContainerId: c.id,
|
||||
ExecId: processID,
|
||||
@ -1645,18 +1645,18 @@ func (k *kataAgent) winsizeProcess(c *Container, processID string, height, width
|
||||
Column: width,
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
_, err := k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) processListContainer(sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) {
|
||||
func (k *kataAgent) processListContainer(ctx context.Context, sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) {
|
||||
req := &grpc.ListProcessesRequest{
|
||||
ContainerId: c.id,
|
||||
Format: options.Format,
|
||||
Args: options.Args,
|
||||
}
|
||||
|
||||
resp, err := k.sendReq(req)
|
||||
resp, err := k.sendReq(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1669,7 +1669,7 @@ func (k *kataAgent) processListContainer(sandbox *Sandbox, c Container, options
|
||||
return processList.ProcessList, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) updateContainer(sandbox *Sandbox, c Container, resources specs.LinuxResources) error {
|
||||
func (k *kataAgent) updateContainer(ctx context.Context, sandbox *Sandbox, c Container, resources specs.LinuxResources) error {
|
||||
grpcResources, err := grpc.ResourcesOCItoGRPC(&resources)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1680,29 +1680,29 @@ func (k *kataAgent) updateContainer(sandbox *Sandbox, c Container, resources spe
|
||||
Resources: grpcResources,
|
||||
}
|
||||
|
||||
_, err = k.sendReq(req)
|
||||
_, err = k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) pauseContainer(sandbox *Sandbox, c Container) error {
|
||||
func (k *kataAgent) pauseContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
|
||||
req := &grpc.PauseContainerRequest{
|
||||
ContainerId: c.id,
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
_, err := k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) resumeContainer(sandbox *Sandbox, c Container) error {
|
||||
func (k *kataAgent) resumeContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
|
||||
req := &grpc.ResumeContainerRequest{
|
||||
ContainerId: c.id,
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
_, err := k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error {
|
||||
func (k *kataAgent) memHotplugByProbe(ctx context.Context, addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error {
|
||||
if memorySectionSizeMB == uint32(0) {
|
||||
return fmt.Errorf("memorySectionSizeMB couldn't be zero")
|
||||
}
|
||||
@ -1722,27 +1722,27 @@ func (k *kataAgent) memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionS
|
||||
MemHotplugProbeAddr: addrList,
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
_, err := k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error {
|
||||
func (k *kataAgent) onlineCPUMem(ctx context.Context, cpus uint32, cpuOnly bool) error {
|
||||
req := &grpc.OnlineCPUMemRequest{
|
||||
Wait: false,
|
||||
NbCpus: cpus,
|
||||
CpuOnly: cpuOnly,
|
||||
}
|
||||
|
||||
_, err := k.sendReq(req)
|
||||
_, err := k.sendReq(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) {
|
||||
func (k *kataAgent) statsContainer(ctx context.Context, sandbox *Sandbox, c Container) (*ContainerStats, error) {
|
||||
req := &grpc.StatsContainerRequest{
|
||||
ContainerId: c.id,
|
||||
}
|
||||
|
||||
returnStats, err := k.sendReq(req)
|
||||
returnStats, err := k.sendReq(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1769,7 +1769,7 @@ func (k *kataAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerSta
|
||||
return containerStats, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) connect() error {
|
||||
func (k *kataAgent) connect(ctx context.Context) error {
|
||||
if k.dead {
|
||||
return errors.New("Dead agent")
|
||||
}
|
||||
@ -1778,7 +1778,7 @@ func (k *kataAgent) connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
span, _ := k.trace("connect")
|
||||
span, ctx := k.trace(ctx, "connect")
|
||||
defer span.End()
|
||||
|
||||
// This is for the first connection only, to prevent race
|
||||
@ -1801,8 +1801,8 @@ func (k *kataAgent) connect() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) disconnect() error {
|
||||
span, _ := k.trace("disconnect")
|
||||
func (k *kataAgent) disconnect(ctx context.Context) error {
|
||||
span, _ := k.trace(ctx, "disconnect")
|
||||
defer span.End()
|
||||
|
||||
k.Lock()
|
||||
@ -1823,22 +1823,22 @@ func (k *kataAgent) disconnect() error {
|
||||
}
|
||||
|
||||
// check grpc server is serving
|
||||
func (k *kataAgent) check() error {
|
||||
span, _ := k.trace("check")
|
||||
func (k *kataAgent) check(ctx context.Context) error {
|
||||
span, ctx := k.trace(ctx, "check")
|
||||
defer span.End()
|
||||
|
||||
_, err := k.sendReq(&grpc.CheckRequest{})
|
||||
_, err := k.sendReq(ctx, &grpc.CheckRequest{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to check if grpc server is working: %s", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) waitProcess(c *Container, processID string) (int32, error) {
|
||||
span, _ := k.trace("waitProcess")
|
||||
func (k *kataAgent) waitProcess(ctx context.Context, c *Container, processID string) (int32, error) {
|
||||
span, ctx := k.trace(ctx, "waitProcess")
|
||||
defer span.End()
|
||||
|
||||
resp, err := k.sendReq(&grpc.WaitProcessRequest{
|
||||
resp, err := k.sendReq(ctx, &grpc.WaitProcessRequest{
|
||||
ContainerId: c.id,
|
||||
ExecId: processID,
|
||||
})
|
||||
@ -1849,8 +1849,8 @@ func (k *kataAgent) waitProcess(c *Container, processID string) (int32, error) {
|
||||
return resp.(*grpc.WaitProcessResponse).Status, nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) writeProcessStdin(c *Container, ProcessID string, data []byte) (int, error) {
|
||||
resp, err := k.sendReq(&grpc.WriteStreamRequest{
|
||||
func (k *kataAgent) writeProcessStdin(ctx context.Context, c *Container, ProcessID string, data []byte) (int, error) {
|
||||
resp, err := k.sendReq(ctx, &grpc.WriteStreamRequest{
|
||||
ContainerId: c.id,
|
||||
ExecId: ProcessID,
|
||||
Data: data,
|
||||
@ -1863,8 +1863,8 @@ func (k *kataAgent) writeProcessStdin(c *Container, ProcessID string, data []byt
|
||||
return int(resp.(*grpc.WriteStreamResponse).Len), nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) closeProcessStdin(c *Container, ProcessID string) error {
|
||||
_, err := k.sendReq(&grpc.CloseStdinRequest{
|
||||
func (k *kataAgent) closeProcessStdin(ctx context.Context, c *Container, ProcessID string) error {
|
||||
_, err := k.sendReq(ctx, &grpc.CloseStdinRequest{
|
||||
ContainerId: c.id,
|
||||
ExecId: ProcessID,
|
||||
})
|
||||
@ -1872,8 +1872,8 @@ func (k *kataAgent) closeProcessStdin(c *Container, ProcessID string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) reseedRNG(data []byte) error {
|
||||
_, err := k.sendReq(&grpc.ReseedRandomDevRequest{
|
||||
func (k *kataAgent) reseedRNG(ctx context.Context, data []byte) error {
|
||||
_, err := k.sendReq(ctx, &grpc.ReseedRandomDevRequest{
|
||||
Data: data,
|
||||
})
|
||||
|
||||
@ -1996,17 +1996,17 @@ func (k *kataAgent) getReqContext(reqName string) (ctx context.Context, cancel c
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
func (k *kataAgent) sendReq(request interface{}) (interface{}, error) {
|
||||
func (k *kataAgent) sendReq(spanCtx context.Context, request interface{}) (interface{}, error) {
|
||||
start := time.Now()
|
||||
span, _ := k.trace("sendReq")
|
||||
span, spanCtx := k.trace(spanCtx, "sendReq")
|
||||
span.SetAttributes(label.Key("request").String(fmt.Sprintf("%+v", request)))
|
||||
defer span.End()
|
||||
|
||||
if err := k.connect(); err != nil {
|
||||
if err := k.connect(spanCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !k.keepConn {
|
||||
defer k.disconnect()
|
||||
defer k.disconnect(spanCtx)
|
||||
}
|
||||
|
||||
msgName := proto.MessageName(request.(proto.Message))
|
||||
@ -2028,24 +2028,24 @@ func (k *kataAgent) sendReq(request interface{}) (interface{}, error) {
|
||||
}
|
||||
|
||||
// readStdout and readStderr are special that we cannot differentiate them with the request types...
|
||||
func (k *kataAgent) readProcessStdout(c *Container, processID string, data []byte) (int, error) {
|
||||
if err := k.connect(); err != nil {
|
||||
func (k *kataAgent) readProcessStdout(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
|
||||
if err := k.connect(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !k.keepConn {
|
||||
defer k.disconnect()
|
||||
defer k.disconnect(ctx)
|
||||
}
|
||||
|
||||
return k.readProcessStream(c.id, processID, data, k.client.AgentServiceClient.ReadStdout)
|
||||
}
|
||||
|
||||
// readStdout and readStderr are special that we cannot differentiate them with the request types...
|
||||
func (k *kataAgent) readProcessStderr(c *Container, processID string, data []byte) (int, error) {
|
||||
if err := k.connect(); err != nil {
|
||||
func (k *kataAgent) readProcessStderr(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
|
||||
if err := k.connect(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !k.keepConn {
|
||||
defer k.disconnect()
|
||||
defer k.disconnect(ctx)
|
||||
}
|
||||
|
||||
return k.readProcessStream(c.id, processID, data, k.client.AgentServiceClient.ReadStderr)
|
||||
@ -2066,8 +2066,8 @@ func (k *kataAgent) readProcessStream(containerID, processID string, data []byte
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func (k *kataAgent) getGuestDetails(req *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) {
|
||||
resp, err := k.sendReq(req)
|
||||
func (k *kataAgent) getGuestDetails(ctx context.Context, req *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) {
|
||||
resp, err := k.sendReq(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2075,8 +2075,8 @@ func (k *kataAgent) getGuestDetails(req *grpc.GuestDetailsRequest) (*grpc.GuestD
|
||||
return resp.(*grpc.GuestDetailsResponse), nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) setGuestDateTime(tv time.Time) error {
|
||||
_, err := k.sendReq(&grpc.SetGuestDateTimeRequest{
|
||||
func (k *kataAgent) setGuestDateTime(ctx context.Context, tv time.Time) error {
|
||||
_, err := k.sendReq(ctx, &grpc.SetGuestDateTimeRequest{
|
||||
Sec: tv.Unix(),
|
||||
Usec: int64(tv.Nanosecond() / 1e3),
|
||||
})
|
||||
@ -2084,7 +2084,7 @@ func (k *kataAgent) setGuestDateTime(tv time.Time) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *kataAgent) copyFile(src, dst string) error {
|
||||
func (k *kataAgent) copyFile(ctx context.Context, src, dst string) error {
|
||||
var st unix.Stat_t
|
||||
|
||||
err := unix.Stat(src, &st)
|
||||
@ -2115,7 +2115,7 @@ func (k *kataAgent) copyFile(src, dst string) error {
|
||||
|
||||
// Handle the special case where the file is empty
|
||||
if fileSize == 0 {
|
||||
_, err = k.sendReq(cpReq)
|
||||
_, err = k.sendReq(ctx, cpReq)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -2131,7 +2131,7 @@ func (k *kataAgent) copyFile(src, dst string) error {
|
||||
cpReq.Data = b[:bytesToCopy]
|
||||
cpReq.Offset = offset
|
||||
|
||||
if _, err = k.sendReq(cpReq); err != nil {
|
||||
if _, err = k.sendReq(ctx, cpReq); err != nil {
|
||||
return fmt.Errorf("Could not send CopyFile request: %v", err)
|
||||
}
|
||||
|
||||
@ -2143,13 +2143,13 @@ func (k *kataAgent) copyFile(src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *kataAgent) markDead() {
|
||||
func (k *kataAgent) markDead(ctx context.Context) {
|
||||
k.Logger().Infof("mark agent dead")
|
||||
k.dead = true
|
||||
k.disconnect()
|
||||
k.disconnect(ctx)
|
||||
}
|
||||
|
||||
func (k *kataAgent) cleanup(s *Sandbox) {
|
||||
func (k *kataAgent) cleanup(ctx context.Context, s *Sandbox) {
|
||||
if err := cleanupSandboxBindMounts(s); err != nil {
|
||||
k.Logger().WithError(err).Errorf("failed to cleanup observability logs bindmount")
|
||||
}
|
||||
@ -2163,7 +2163,7 @@ func (k *kataAgent) cleanup(s *Sandbox) {
|
||||
|
||||
// Unmount mount path
|
||||
path = getMountPath(s.id)
|
||||
if err := bindUnmountAllRootfs(k.ctx, path, s); err != nil {
|
||||
if err := bindUnmountAllRootfs(ctx, path, s); err != nil {
|
||||
k.Logger().WithError(err).Errorf("failed to unmount vm mount path %s", path)
|
||||
}
|
||||
if err := os.RemoveAll(getSandboxPath(s.id)); err != nil {
|
||||
@ -2181,9 +2181,9 @@ func (k *kataAgent) load(s persistapi.AgentState) {
|
||||
k.state.URL = s.URL
|
||||
}
|
||||
|
||||
func (k *kataAgent) getOOMEvent() (string, error) {
|
||||
func (k *kataAgent) getOOMEvent(ctx context.Context) (string, error) {
|
||||
req := &grpc.GetOOMEventRequest{}
|
||||
result, err := k.sendReq(req)
|
||||
result, err := k.sendReq(ctx, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -2193,8 +2193,8 @@ func (k *kataAgent) getOOMEvent() (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (k *kataAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
|
||||
resp, err := k.sendReq(req)
|
||||
func (k *kataAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
|
||||
resp, err := k.sendReq(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@ -59,7 +60,7 @@ func (endpoint *MacvtapEndpoint) SetProperties(properties NetworkInfo) {
|
||||
}
|
||||
|
||||
// Attach for macvtap endpoint passes macvtap device to the hypervisor.
|
||||
func (endpoint *MacvtapEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *MacvtapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
var err error
|
||||
h := s.hypervisor
|
||||
|
||||
@ -76,21 +77,21 @@ func (endpoint *MacvtapEndpoint) Attach(s *Sandbox) error {
|
||||
endpoint.VhostFds = vhostFds
|
||||
}
|
||||
|
||||
return h.addDevice(endpoint, netDev)
|
||||
return h.addDevice(ctx, endpoint, netDev)
|
||||
}
|
||||
|
||||
// Detach for macvtap endpoint does nothing.
|
||||
func (endpoint *MacvtapEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *MacvtapEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HotAttach for macvtap endpoint not supported yet
|
||||
func (endpoint *MacvtapEndpoint) HotAttach(h hypervisor) error {
|
||||
func (endpoint *MacvtapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
return fmt.Errorf("MacvtapEndpoint does not support Hot attach")
|
||||
}
|
||||
|
||||
// HotDetach for macvtap endpoint not supported yet
|
||||
func (endpoint *MacvtapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *MacvtapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
return fmt.Errorf("MacvtapEndpoint does not support Hot detach")
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ func (n *mockAgent) longLiveConn() bool {
|
||||
}
|
||||
|
||||
// createSandbox is the Noop agent sandbox creation implementation. It does nothing.
|
||||
func (n *mockAgent) createSandbox(sandbox *Sandbox) error {
|
||||
func (n *mockAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -46,137 +46,137 @@ func (n *mockAgent) capabilities() types.Capabilities {
|
||||
}
|
||||
|
||||
// disconnect is the Noop agent connection closer. It does nothing.
|
||||
func (n *mockAgent) disconnect() error {
|
||||
func (n *mockAgent) disconnect(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// exec is the Noop agent command execution implementation. It does nothing.
|
||||
func (n *mockAgent) exec(sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {
|
||||
func (n *mockAgent) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// startSandbox is the Noop agent Sandbox starting implementation. It does nothing.
|
||||
func (n *mockAgent) startSandbox(sandbox *Sandbox) error {
|
||||
func (n *mockAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopSandbox is the Noop agent Sandbox stopping implementation. It does nothing.
|
||||
func (n *mockAgent) stopSandbox(sandbox *Sandbox) error {
|
||||
func (n *mockAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// createContainer is the Noop agent Container creation implementation. It does nothing.
|
||||
func (n *mockAgent) createContainer(sandbox *Sandbox, c *Container) (*Process, error) {
|
||||
func (n *mockAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (*Process, error) {
|
||||
return &Process{}, nil
|
||||
}
|
||||
|
||||
// startContainer is the Noop agent Container starting implementation. It does nothing.
|
||||
func (n *mockAgent) startContainer(sandbox *Sandbox, c *Container) error {
|
||||
func (n *mockAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stopContainer is the Noop agent Container stopping implementation. It does nothing.
|
||||
func (n *mockAgent) stopContainer(sandbox *Sandbox, c Container) error {
|
||||
func (n *mockAgent) stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// signalProcess is the Noop agent Container signaling implementation. It does nothing.
|
||||
func (n *mockAgent) signalProcess(c *Container, processID string, signal syscall.Signal, all bool) error {
|
||||
func (n *mockAgent) signalProcess(ctx context.Context, c *Container, processID string, signal syscall.Signal, all bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// processListContainer is the Noop agent Container ps implementation. It does nothing.
|
||||
func (n *mockAgent) processListContainer(sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) {
|
||||
func (n *mockAgent) processListContainer(ctx context.Context, sandbox *Sandbox, c Container, options ProcessListOptions) (ProcessList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// updateContainer is the Noop agent Container update implementation. It does nothing.
|
||||
func (n *mockAgent) updateContainer(sandbox *Sandbox, c Container, resources specs.LinuxResources) error {
|
||||
func (n *mockAgent) updateContainer(ctx context.Context, sandbox *Sandbox, c Container, resources specs.LinuxResources) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// memHotplugByProbe is the Noop agent notify meomory hotplug event via probe interface implementation. It does nothing.
|
||||
func (n *mockAgent) memHotplugByProbe(addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error {
|
||||
func (n *mockAgent) memHotplugByProbe(ctx context.Context, addr uint64, sizeMB uint32, memorySectionSizeMB uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// onlineCPUMem is the Noop agent Container online CPU and Memory implementation. It does nothing.
|
||||
func (n *mockAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error {
|
||||
func (n *mockAgent) onlineCPUMem(ctx context.Context, cpus uint32, cpuOnly bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateInterface is the Noop agent Interface update implementation. It does nothing.
|
||||
func (n *mockAgent) updateInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
func (n *mockAgent) updateInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// listInterfaces is the Noop agent Interfaces list implementation. It does nothing.
|
||||
func (n *mockAgent) listInterfaces() ([]*pbTypes.Interface, error) {
|
||||
func (n *mockAgent) listInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// updateRoutes is the Noop agent Routes update implementation. It does nothing.
|
||||
func (n *mockAgent) updateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
func (n *mockAgent) updateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// listRoutes is the Noop agent Routes list implementation. It does nothing.
|
||||
func (n *mockAgent) listRoutes() ([]*pbTypes.Route, error) {
|
||||
func (n *mockAgent) listRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check is the Noop agent health checker. It does nothing.
|
||||
func (n *mockAgent) check() error {
|
||||
func (n *mockAgent) check(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// statsContainer is the Noop agent Container stats implementation. It does nothing.
|
||||
func (n *mockAgent) statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error) {
|
||||
func (n *mockAgent) statsContainer(ctx context.Context, sandbox *Sandbox, c Container) (*ContainerStats, error) {
|
||||
return &ContainerStats{}, nil
|
||||
}
|
||||
|
||||
// waitProcess is the Noop agent process waiter. It does nothing.
|
||||
func (n *mockAgent) waitProcess(c *Container, processID string) (int32, error) {
|
||||
func (n *mockAgent) waitProcess(ctx context.Context, c *Container, processID string) (int32, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// winsizeProcess is the Noop agent process tty resizer. It does nothing.
|
||||
func (n *mockAgent) winsizeProcess(c *Container, processID string, height, width uint32) error {
|
||||
func (n *mockAgent) winsizeProcess(ctx context.Context, c *Container, processID string, height, width uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeProcessStdin is the Noop agent process stdin writer. It does nothing.
|
||||
func (n *mockAgent) writeProcessStdin(c *Container, ProcessID string, data []byte) (int, error) {
|
||||
func (n *mockAgent) writeProcessStdin(ctx context.Context, c *Container, ProcessID string, data []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// closeProcessStdin is the Noop agent process stdin closer. It does nothing.
|
||||
func (n *mockAgent) closeProcessStdin(c *Container, ProcessID string) error {
|
||||
func (n *mockAgent) closeProcessStdin(ctx context.Context, c *Container, ProcessID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// readProcessStdout is the Noop agent process stdout reader. It does nothing.
|
||||
func (n *mockAgent) readProcessStdout(c *Container, processID string, data []byte) (int, error) {
|
||||
func (n *mockAgent) readProcessStdout(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// readProcessStderr is the Noop agent process stderr reader. It does nothing.
|
||||
func (n *mockAgent) readProcessStderr(c *Container, processID string, data []byte) (int, error) {
|
||||
func (n *mockAgent) readProcessStderr(ctx context.Context, c *Container, processID string, data []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// pauseContainer is the Noop agent Container pause implementation. It does nothing.
|
||||
func (n *mockAgent) pauseContainer(sandbox *Sandbox, c Container) error {
|
||||
func (n *mockAgent) pauseContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resumeContainer is the Noop agent Container resume implementation. It does nothing.
|
||||
func (n *mockAgent) resumeContainer(sandbox *Sandbox, c Container) error {
|
||||
func (n *mockAgent) resumeContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// configHypervisor is the Noop agent hypervisor configuration implementation. It does nothing.
|
||||
func (n *mockAgent) configure(h hypervisor, id, sharePath string, config interface{}) error {
|
||||
func (n *mockAgent) configure(ctx context.Context, h hypervisor, id, sharePath string, config interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -185,7 +185,7 @@ func (n *mockAgent) configureFromGrpc(h hypervisor, id string, config interface{
|
||||
}
|
||||
|
||||
// reseedRNG is the Noop agent RND reseeder. It does nothing.
|
||||
func (n *mockAgent) reseedRNG(data []byte) error {
|
||||
func (n *mockAgent) reseedRNG(ctx context.Context, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -205,24 +205,24 @@ func (n *mockAgent) setAgentURL() error {
|
||||
}
|
||||
|
||||
// getGuestDetails is the Noop agent GuestDetails queryer. It does nothing.
|
||||
func (n *mockAgent) getGuestDetails(*grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) {
|
||||
func (n *mockAgent) getGuestDetails(context.Context, *grpc.GuestDetailsRequest) (*grpc.GuestDetailsResponse, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// setGuestDateTime is the Noop agent guest time setter. It does nothing.
|
||||
func (n *mockAgent) setGuestDateTime(time.Time) error {
|
||||
func (n *mockAgent) setGuestDateTime(context.Context, time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyFile is the Noop agent copy file. It does nothing.
|
||||
func (n *mockAgent) copyFile(src, dst string) error {
|
||||
func (n *mockAgent) copyFile(ctx context.Context, src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *mockAgent) markDead() {
|
||||
func (n *mockAgent) markDead(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (n *mockAgent) cleanup(s *Sandbox) {
|
||||
func (n *mockAgent) cleanup(ctx context.Context, s *Sandbox) {
|
||||
}
|
||||
|
||||
// save is the Noop agent state saver. It does nothing.
|
||||
@ -233,10 +233,10 @@ func (n *mockAgent) save() (s persistapi.AgentState) {
|
||||
// load is the Noop agent state loader. It does nothing.
|
||||
func (n *mockAgent) load(s persistapi.AgentState) {}
|
||||
|
||||
func (n *mockAgent) getOOMEvent() (string, error) {
|
||||
func (n *mockAgent) getOOMEvent(ctx context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (k *mockAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
|
||||
func (k *mockAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ type mockHypervisor struct {
|
||||
mockPid int
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) capabilities() types.Capabilities {
|
||||
func (m *mockHypervisor) capabilities(ctx context.Context) types.Capabilities {
|
||||
return types.Capabilities{}
|
||||
}
|
||||
|
||||
@ -37,19 +37,19 @@ func (m *mockHypervisor) createSandbox(ctx context.Context, id string, networkNS
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) startSandbox(timeout int) error {
|
||||
func (m *mockHypervisor) startSandbox(ctx context.Context, timeout int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) stopSandbox() error {
|
||||
func (m *mockHypervisor) stopSandbox(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) pauseSandbox() error {
|
||||
func (m *mockHypervisor) pauseSandbox(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) resumeSandbox() error {
|
||||
func (m *mockHypervisor) resumeSandbox(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -57,11 +57,11 @@ func (m *mockHypervisor) saveSandbox() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) addDevice(devInfo interface{}, devType deviceType) error {
|
||||
func (m *mockHypervisor) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
func (m *mockHypervisor) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
switch devType {
|
||||
case cpuDev:
|
||||
return devInfo.(uint32), nil
|
||||
@ -72,7 +72,7 @@ func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceTyp
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
func (m *mockHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
switch devType {
|
||||
case cpuDev:
|
||||
return devInfo.(uint32), nil
|
||||
@ -82,26 +82,26 @@ func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType device
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) getSandboxConsole(sandboxID string) (string, string, error) {
|
||||
func (m *mockHypervisor) getSandboxConsole(ctx context.Context, sandboxID string) (string, string, error) {
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) resizeMemory(memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
func (m *mockHypervisor) resizeMemory(ctx context.Context, memMB uint32, memorySectionSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
return 0, memoryDevice{}, nil
|
||||
}
|
||||
func (m *mockHypervisor) resizeVCPUs(cpus uint32) (uint32, uint32, error) {
|
||||
func (m *mockHypervisor) resizeVCPUs(ctx context.Context, cpus uint32) (uint32, uint32, error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) disconnect() {
|
||||
func (m *mockHypervisor) disconnect(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) getThreadIDs() (vcpuThreadIDs, error) {
|
||||
func (m *mockHypervisor) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
|
||||
vcpus := map[int]int{0: os.Getpid()}
|
||||
return vcpuThreadIDs{vcpus}, nil
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) cleanup() error {
|
||||
func (m *mockHypervisor) cleanup(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ func (m *mockHypervisor) fromGrpc(ctx context.Context, hypervisorConfig *Hypervi
|
||||
return errors.New("mockHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
func (m *mockHypervisor) toGrpc() ([]byte, error) {
|
||||
func (m *mockHypervisor) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
return nil, errors.New("mockHypervisor is not supported by VM cache")
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ func TestMockHypervisorCreateSandbox(t *testing.T) {
|
||||
func TestMockHypervisorStartSandbox(t *testing.T) {
|
||||
var m *mockHypervisor
|
||||
|
||||
assert.NoError(t, m.startSandbox(vmStartTimeout))
|
||||
assert.NoError(t, m.startSandbox(context.Background(), vmStartTimeout))
|
||||
}
|
||||
|
||||
func TestMockHypervisorStopSandbox(t *testing.T) {
|
||||
@ -59,7 +59,7 @@ func TestMockHypervisorStopSandbox(t *testing.T) {
|
||||
func TestMockHypervisorAddDevice(t *testing.T) {
|
||||
var m *mockHypervisor
|
||||
|
||||
assert.NoError(t, m.addDevice(nil, imgDev))
|
||||
assert.NoError(t, m.addDevice(context.Background(), nil, imgDev))
|
||||
}
|
||||
|
||||
func TestMockHypervisorGetSandboxConsole(t *testing.T) {
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -36,7 +37,7 @@ func newMonitor(s *Sandbox) *monitor {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *monitor) newWatcher() (chan error, error) {
|
||||
func (m *monitor) newWatcher(ctx context.Context) (chan error, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
@ -57,8 +58,8 @@ func (m *monitor) newWatcher() (chan error, error) {
|
||||
m.wg.Done()
|
||||
return
|
||||
case <-tick.C:
|
||||
m.watchHypervisor()
|
||||
m.watchAgent()
|
||||
m.watchHypervisor(ctx)
|
||||
m.watchAgent(ctx)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -67,8 +68,8 @@ func (m *monitor) newWatcher() (chan error, error) {
|
||||
return watcher, nil
|
||||
}
|
||||
|
||||
func (m *monitor) notify(err error) {
|
||||
m.sandbox.agent.markDead()
|
||||
func (m *monitor) notify(ctx context.Context, err error) {
|
||||
m.sandbox.agent.markDead(ctx)
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
@ -127,17 +128,17 @@ func (m *monitor) stop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *monitor) watchAgent() {
|
||||
err := m.sandbox.agent.check()
|
||||
func (m *monitor) watchAgent(ctx context.Context) {
|
||||
err := m.sandbox.agent.check(ctx)
|
||||
if err != nil {
|
||||
// TODO: define and export error types
|
||||
m.notify(errors.Wrapf(err, "failed to ping agent"))
|
||||
m.notify(ctx, errors.Wrapf(err, "failed to ping agent"))
|
||||
}
|
||||
}
|
||||
|
||||
func (m *monitor) watchHypervisor() error {
|
||||
func (m *monitor) watchHypervisor(ctx context.Context) error {
|
||||
if err := m.sandbox.hypervisor.check(); err != nil {
|
||||
m.notify(errors.Wrapf(err, "failed to ping hypervisor process"))
|
||||
m.notify(ctx, errors.Wrapf(err, "failed to ping hypervisor process"))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -219,7 +219,7 @@ const mountPerm = os.FileMode(0755)
|
||||
// * recursively create the destination
|
||||
// pgtypes stands for propagation types, which are shared, private, slave, and ubind.
|
||||
func bindMount(ctx context.Context, source, destination string, readonly bool, pgtypes string) error {
|
||||
span, _ := trace(ctx, "bindMount")
|
||||
span, ctx := trace(ctx, "bindMount")
|
||||
defer span.End()
|
||||
|
||||
if source == "" {
|
||||
@ -347,7 +347,7 @@ func bindUnmountContainerRootfs(ctx context.Context, sharedDir, cID string) erro
|
||||
}
|
||||
|
||||
func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbox) error {
|
||||
span, _ := trace(ctx, "bindUnmountAllRootfs")
|
||||
span, ctx := trace(ctx, "bindUnmountAllRootfs")
|
||||
defer span.End()
|
||||
|
||||
var errors *merr.Error
|
||||
@ -356,11 +356,11 @@ func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbo
|
||||
mountLogger().WithField("container", c.id).Warnf("container dir is a symlink, malicious guest?")
|
||||
continue
|
||||
}
|
||||
c.unmountHostMounts()
|
||||
c.unmountHostMounts(ctx)
|
||||
if c.state.Fstype == "" {
|
||||
// even if error found, don't break out of loop until all mounts attempted
|
||||
// to be unmounted, and collect all errors
|
||||
errors = merr.Append(errors, bindUnmountContainerRootfs(c.ctx, sharedDir, c.id))
|
||||
errors = merr.Append(errors, bindUnmountContainerRootfs(ctx, sharedDir, c.id))
|
||||
}
|
||||
}
|
||||
return errors.ErrorOrNil()
|
||||
|
@ -406,11 +406,11 @@ func getLinkByName(netHandle *netlink.Handle, name string, expectedLink netlink.
|
||||
}
|
||||
|
||||
// The endpoint type should dictate how the connection needs to happen.
|
||||
func xConnectVMNetwork(endpoint Endpoint, h hypervisor) error {
|
||||
func xConnectVMNetwork(ctx context.Context, endpoint Endpoint, h hypervisor) error {
|
||||
netPair := endpoint.NetworkPair()
|
||||
|
||||
queues := 0
|
||||
caps := h.capabilities()
|
||||
caps := h.capabilities(ctx)
|
||||
if caps.IsMultiQueueSupported() {
|
||||
queues = int(h.hypervisorConfig().NumVCPUs)
|
||||
}
|
||||
@ -1262,8 +1262,8 @@ func (n *Network) trace(ctx context.Context, name string) (otelTrace.Span, conte
|
||||
}
|
||||
|
||||
// Run runs a callback in the specified network namespace.
|
||||
func (n *Network) Run(networkNSPath string, cb func() error) error {
|
||||
span, _ := n.trace(context.Background(), "run")
|
||||
func (n *Network) Run(ctx context.Context, networkNSPath string, cb func() error) error {
|
||||
span, _ := n.trace(ctx, "Run")
|
||||
defer span.End()
|
||||
|
||||
return doNetNS(networkNSPath, func(_ ns.NetNS) error {
|
||||
@ -1273,7 +1273,7 @@ func (n *Network) Run(networkNSPath string, cb func() error) error {
|
||||
|
||||
// Add adds all needed interfaces inside the network namespace.
|
||||
func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, hotplug bool) ([]Endpoint, error) {
|
||||
span, _ := n.trace(ctx, "add")
|
||||
span, ctx := n.trace(ctx, "Add")
|
||||
defer span.End()
|
||||
|
||||
endpoints, err := createEndpointsFromScan(config.NetNSPath, config)
|
||||
@ -1285,11 +1285,11 @@ func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, ho
|
||||
for _, endpoint := range endpoints {
|
||||
networkLogger().WithField("endpoint-type", endpoint.Type()).WithField("hotplug", hotplug).Info("Attaching endpoint")
|
||||
if hotplug {
|
||||
if err := endpoint.HotAttach(s.hypervisor); err != nil {
|
||||
if err := endpoint.HotAttach(ctx, s.hypervisor); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := endpoint.Attach(s); err != nil {
|
||||
if err := endpoint.Attach(ctx, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1354,7 +1354,7 @@ func (n *Network) PostAdd(ctx context.Context, ns *NetworkNamespace, hotplug boo
|
||||
// Remove network endpoints in the network namespace. It also deletes the network
|
||||
// namespace in case the namespace has been created by us.
|
||||
func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor hypervisor) error {
|
||||
span, _ := n.trace(ctx, "remove")
|
||||
span, ctx := n.trace(ctx, "Remove")
|
||||
defer span.End()
|
||||
|
||||
for _, endpoint := range ns.Endpoints {
|
||||
@ -1377,7 +1377,7 @@ func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor h
|
||||
// Detach for an endpoint should enter the network namespace
|
||||
// if required.
|
||||
networkLogger().WithField("endpoint-type", endpoint.Type()).Info("Detaching endpoint")
|
||||
if err := endpoint.Detach(ns.NetNsCreated, ns.NetNsPath); err != nil {
|
||||
if err := endpoint.Detach(ctx, ns.NetNsCreated, ns.NetNsPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -74,7 +75,7 @@ func (endpoint *PhysicalEndpoint) NetworkPair() *NetworkInterfacePair {
|
||||
|
||||
// Attach for physical endpoint binds the physical network interface to
|
||||
// vfio-pci and adds device to the hypervisor with vfio-passthrough.
|
||||
func (endpoint *PhysicalEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *PhysicalEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
// Unbind physical interface from host driver and bind to vfio
|
||||
// so that it can be passed to qemu.
|
||||
vfioPath, err := bindNICToVFIO(endpoint)
|
||||
@ -95,13 +96,13 @@ func (endpoint *PhysicalEndpoint) Attach(s *Sandbox) error {
|
||||
ColdPlug: true,
|
||||
}
|
||||
|
||||
_, err = s.AddDevice(d)
|
||||
_, err = s.AddDevice(ctx, d)
|
||||
return err
|
||||
}
|
||||
|
||||
// Detach for physical endpoint unbinds the physical network interface from vfio-pci
|
||||
// and binds it back to the saved host driver.
|
||||
func (endpoint *PhysicalEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *PhysicalEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
// Bind back the physical network interface to host.
|
||||
// We need to do this even if a new network namespace has not
|
||||
// been created by virtcontainers.
|
||||
@ -112,12 +113,12 @@ func (endpoint *PhysicalEndpoint) Detach(netNsCreated bool, netNsPath string) er
|
||||
}
|
||||
|
||||
// HotAttach for physical endpoint not supported yet
|
||||
func (endpoint *PhysicalEndpoint) HotAttach(h hypervisor) error {
|
||||
func (endpoint *PhysicalEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
return fmt.Errorf("PhysicalEndpoint does not support Hot attach")
|
||||
}
|
||||
|
||||
// HotDetach for physical endpoint not supported yet
|
||||
func (endpoint *PhysicalEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *PhysicalEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
return fmt.Errorf("PhysicalEndpoint does not support Hot detach")
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ func TestPhysicalEndpoint_HotAttach(t *testing.T) {
|
||||
|
||||
h := &mockHypervisor{}
|
||||
|
||||
err := v.HotAttach(h)
|
||||
err := v.HotAttach(context.Background(), h)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ func TestPhysicalEndpoint_HotDetach(t *testing.T) {
|
||||
|
||||
h := &mockHypervisor{}
|
||||
|
||||
err := v.HotDetach(h, true, "")
|
||||
err := v.HotDetach(context.Background(), h, true, "")
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
|
@ -7,6 +7,7 @@ package cgroups
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -295,7 +296,7 @@ func (m *Manager) Destroy() error {
|
||||
}
|
||||
|
||||
// AddDevice adds a device to the device cgroup
|
||||
func (m *Manager) AddDevice(device string) error {
|
||||
func (m *Manager) AddDevice(ctx context.Context, device string) error {
|
||||
cgroups, err := m.GetCgroups()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -65,7 +65,7 @@ func (s *Sandbox) GetContainer(containerID string) vc.VCContainer {
|
||||
}
|
||||
|
||||
// Release implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) Release() error {
|
||||
func (s *Sandbox) Release(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -90,12 +90,12 @@ func (s *Sandbox) Resume() error {
|
||||
}
|
||||
|
||||
// Delete implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) Delete() error {
|
||||
func (s *Sandbox) Delete(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) CreateContainer(conf vc.ContainerConfig) (vc.VCContainer, error) {
|
||||
func (s *Sandbox) CreateContainer(ctx context.Context, conf vc.ContainerConfig) (vc.VCContainer, error) {
|
||||
if s.CreateContainerFunc != nil {
|
||||
return s.CreateContainerFunc(conf)
|
||||
}
|
||||
@ -103,12 +103,12 @@ func (s *Sandbox) CreateContainer(conf vc.ContainerConfig) (vc.VCContainer, erro
|
||||
}
|
||||
|
||||
// DeleteContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) DeleteContainer(contID string) (vc.VCContainer, error) {
|
||||
func (s *Sandbox) DeleteContainer(ctx context.Context, contID string) (vc.VCContainer, error) {
|
||||
return &Container{}, nil
|
||||
}
|
||||
|
||||
// StartContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) StartContainer(contID string) (vc.VCContainer, error) {
|
||||
func (s *Sandbox) StartContainer(ctx context.Context, contID string) (vc.VCContainer, error) {
|
||||
return &Container{}, nil
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ func (s *Sandbox) StopContainer(contID string, force bool) (vc.VCContainer, erro
|
||||
}
|
||||
|
||||
// KillContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) KillContainer(contID string, signal syscall.Signal, all bool) error {
|
||||
func (s *Sandbox) KillContainer(ctx context.Context, contID string, signal syscall.Signal, all bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ func (s *Sandbox) StatusContainer(contID string) (vc.ContainerStatus, error) {
|
||||
}
|
||||
|
||||
// StatsContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) StatsContainer(contID string) (vc.ContainerStats, error) {
|
||||
func (s *Sandbox) StatsContainer(ctx context.Context, contID string) (vc.ContainerStats, error) {
|
||||
if s.StatsContainerFunc != nil {
|
||||
return s.StatsContainerFunc(contID)
|
||||
}
|
||||
@ -136,12 +136,12 @@ func (s *Sandbox) StatsContainer(contID string) (vc.ContainerStats, error) {
|
||||
}
|
||||
|
||||
// PauseContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) PauseContainer(contID string) error {
|
||||
func (s *Sandbox) PauseContainer(ctx context.Context, contID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResumeContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) ResumeContainer(contID string) error {
|
||||
func (s *Sandbox) ResumeContainer(ctx context.Context, contID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -151,37 +151,37 @@ func (s *Sandbox) Status() vc.SandboxStatus {
|
||||
}
|
||||
|
||||
// EnterContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (vc.VCContainer, *vc.Process, error) {
|
||||
func (s *Sandbox) EnterContainer(ctx context.Context, containerID string, cmd types.Cmd) (vc.VCContainer, *vc.Process, error) {
|
||||
return &Container{}, &vc.Process{}, nil
|
||||
}
|
||||
|
||||
// Monitor implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) Monitor() (chan error, error) {
|
||||
func (s *Sandbox) Monitor(ctx context.Context) (chan error, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// UpdateContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) UpdateContainer(containerID string, resources specs.LinuxResources) error {
|
||||
func (s *Sandbox) UpdateContainer(ctx context.Context, containerID string, resources specs.LinuxResources) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessListContainer implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) ProcessListContainer(containerID string, options vc.ProcessListOptions) (vc.ProcessList, error) {
|
||||
func (s *Sandbox) ProcessListContainer(ctx context.Context, containerID string, options vc.ProcessListOptions) (vc.ProcessList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// WaitProcess implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) WaitProcess(containerID, processID string) (int32, error) {
|
||||
func (s *Sandbox) WaitProcess(ctx context.Context, containerID, processID string) (int32, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// SignalProcess implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) SignalProcess(containerID, processID string, signal syscall.Signal, all bool) error {
|
||||
func (s *Sandbox) SignalProcess(ctx context.Context, containerID, processID string, signal syscall.Signal, all bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WinsizeProcess implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) WinsizeProcess(containerID, processID string, height, width uint32) error {
|
||||
func (s *Sandbox) WinsizeProcess(ctx context.Context, containerID, processID string, height, width uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -191,36 +191,36 @@ func (s *Sandbox) IOStream(containerID, processID string) (io.WriteCloser, io.Re
|
||||
}
|
||||
|
||||
// AddDevice adds a device to sandbox
|
||||
func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
|
||||
func (s *Sandbox) AddDevice(ctx context.Context, info config.DeviceInfo) (api.Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AddInterface implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
func (s *Sandbox) AddInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// RemoveInterface implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
func (s *Sandbox) RemoveInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ListInterfaces implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) ListInterfaces() ([]*pbTypes.Interface, error) {
|
||||
func (s *Sandbox) ListInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// UpdateRoutes implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) UpdateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
func (s *Sandbox) UpdateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ListRoutes implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) ListRoutes() ([]*pbTypes.Route, error) {
|
||||
func (s *Sandbox) ListRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) GetOOMEvent() (string, error) {
|
||||
func (s *Sandbox) GetOOMEvent(ctx context.Context) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@ -233,7 +233,7 @@ func (s *Sandbox) UpdateRuntimeMetrics() error {
|
||||
}
|
||||
|
||||
// GetAgentMetrics implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) GetAgentMetrics() (string, error) {
|
||||
func (s *Sandbox) GetAgentMetrics(ctx context.Context) (string, error) {
|
||||
if s.GetAgentMetricsFunc != nil {
|
||||
return s.GetAgentMetricsFunc()
|
||||
}
|
||||
@ -241,7 +241,7 @@ func (s *Sandbox) GetAgentMetrics() (string, error) {
|
||||
}
|
||||
|
||||
// Stats implements the VCSandbox function of the same name.
|
||||
func (s *Sandbox) Stats() (vc.SandboxStats, error) {
|
||||
func (s *Sandbox) Stats(ctx context.Context) (vc.SandboxStats, error) {
|
||||
if s.StatsFunc != nil {
|
||||
return s.StatsFunc()
|
||||
}
|
||||
|
@ -185,8 +185,8 @@ func (q *qemu) kernelParameters() string {
|
||||
}
|
||||
|
||||
// Adds all capabilities supported by qemu implementation of hypervisor interface
|
||||
func (q *qemu) capabilities() types.Capabilities {
|
||||
span, _ := q.trace("capabilities")
|
||||
func (q *qemu) capabilities(ctx context.Context) types.Capabilities {
|
||||
span, _ := q.trace(ctx, "capabilities")
|
||||
defer span.End()
|
||||
|
||||
return q.arch.capabilities()
|
||||
@ -214,22 +214,22 @@ func (q *qemu) qemuPath() (string, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (q *qemu) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if q.ctx == nil {
|
||||
func (q *qemu) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
q.Logger().WithField("type", "bug").Error("trace called before context set")
|
||||
q.ctx = context.Background()
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(q.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes([]otelLabel.KeyValue{otelLabel.Key("subsystem").String("hypervisor"), otelLabel.Key("type").String("qemu")}...)
|
||||
|
||||
return span, ctx
|
||||
}
|
||||
|
||||
// setup sets the Qemu structure up.
|
||||
func (q *qemu) setup(id string, hypervisorConfig *HypervisorConfig) error {
|
||||
span, _ := q.trace("setup")
|
||||
func (q *qemu) setup(ctx context.Context, id string, hypervisorConfig *HypervisorConfig) error {
|
||||
span, _ := q.trace(ctx, "setup")
|
||||
defer span.End()
|
||||
|
||||
err := hypervisorConfig.valid()
|
||||
@ -387,10 +387,10 @@ func (q *qemu) createQmpSocket() ([]govmmQemu.QMPSocket, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (q *qemu) buildDevices(initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) {
|
||||
func (q *qemu) buildDevices(ctx context.Context, initrdPath string) ([]govmmQemu.Device, *govmmQemu.IOThread, error) {
|
||||
var devices []govmmQemu.Device
|
||||
|
||||
_, console, err := q.getSandboxConsole(q.id)
|
||||
_, console, err := q.getSandboxConsole(ctx, q.id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -472,10 +472,10 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
|
||||
// Save the tracing context
|
||||
q.ctx = ctx
|
||||
|
||||
span, _ := q.trace("createSandbox")
|
||||
span, ctx := q.trace(ctx, "createSandbox")
|
||||
defer span.End()
|
||||
|
||||
if err := q.setup(id, hypervisorConfig); err != nil {
|
||||
if err := q.setup(ctx, id, hypervisorConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -562,7 +562,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
|
||||
return err
|
||||
}
|
||||
|
||||
devices, ioThread, err := q.buildDevices(initrdPath)
|
||||
devices, ioThread, err := q.buildDevices(ctx, initrdPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -658,7 +658,7 @@ func (q *qemu) virtiofsdArgs(fd uintptr) []string {
|
||||
return args
|
||||
}
|
||||
|
||||
func (q *qemu) setupVirtiofsd() (err error) {
|
||||
func (q *qemu) setupVirtiofsd(ctx context.Context) (err error) {
|
||||
var listener *net.UnixListener
|
||||
var fd *os.File
|
||||
|
||||
@ -707,7 +707,7 @@ func (q *qemu) setupVirtiofsd() (err error) {
|
||||
q.Logger().Info("virtiofsd quits")
|
||||
// Wait to release resources of virtiofsd process
|
||||
cmd.Process.Wait()
|
||||
q.stopSandbox()
|
||||
q.stopSandbox(ctx)
|
||||
}()
|
||||
return err
|
||||
}
|
||||
@ -775,8 +775,8 @@ func (q *qemu) setupVirtioMem() error {
|
||||
}
|
||||
|
||||
// startSandbox will start the Sandbox's VM.
|
||||
func (q *qemu) startSandbox(timeout int) error {
|
||||
span, _ := q.trace("startSandbox")
|
||||
func (q *qemu) startSandbox(ctx context.Context, timeout int) error {
|
||||
span, ctx := q.trace(ctx, "startSandbox")
|
||||
defer span.End()
|
||||
|
||||
if q.config.Debug {
|
||||
@ -828,7 +828,7 @@ func (q *qemu) startSandbox(timeout int) error {
|
||||
defer label.SetProcessLabel("")
|
||||
|
||||
if q.config.SharedFS == config.VirtioFS {
|
||||
err = q.setupVirtiofsd()
|
||||
err = q.setupVirtiofsd(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -848,7 +848,7 @@ func (q *qemu) startSandbox(timeout int) error {
|
||||
return fmt.Errorf("failed to launch qemu: %s, error messages from qemu log: %s", err, strErr)
|
||||
}
|
||||
|
||||
err = q.waitSandbox(timeout)
|
||||
err = q.waitSandbox(ctx, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -886,8 +886,8 @@ func (q *qemu) bootFromTemplate() error {
|
||||
}
|
||||
|
||||
// waitSandbox will wait for the Sandbox's VM to be up and running.
|
||||
func (q *qemu) waitSandbox(timeout int) error {
|
||||
span, _ := q.trace("waitSandbox")
|
||||
func (q *qemu) waitSandbox(ctx context.Context, timeout int) error {
|
||||
span, _ := q.trace(ctx, "waitSandbox")
|
||||
defer span.End()
|
||||
|
||||
if timeout < 0 {
|
||||
@ -940,8 +940,8 @@ func (q *qemu) waitSandbox(timeout int) error {
|
||||
}
|
||||
|
||||
// stopSandbox will stop the Sandbox's VM.
|
||||
func (q *qemu) stopSandbox() error {
|
||||
span, _ := q.trace("stopSandbox")
|
||||
func (q *qemu) stopSandbox(ctx context.Context) error {
|
||||
span, _ := q.trace(ctx, "stopSandbox")
|
||||
defer span.End()
|
||||
|
||||
q.Logger().Info("Stopping Sandbox")
|
||||
@ -1014,8 +1014,8 @@ func (q *qemu) cleanupVM() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *qemu) togglePauseSandbox(pause bool) error {
|
||||
span, _ := q.trace("togglePauseSandbox")
|
||||
func (q *qemu) togglePauseSandbox(ctx context.Context, pause bool) error {
|
||||
span, _ := q.trace(ctx, "togglePauseSandbox")
|
||||
defer span.End()
|
||||
|
||||
if err := q.qmpSetup(); err != nil {
|
||||
@ -1210,7 +1210,7 @@ func (q *qemu) qmpShutdown() {
|
||||
}
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, devID string) (err error) {
|
||||
func (q *qemu) hotplugAddBlockDevice(ctx context.Context, drive *config.BlockDrive, op operation, devID string) (err error) {
|
||||
// drive can be a pmem device, in which case it's used as backing file for a nvdimm device
|
||||
if q.config.BlockDeviceDriver == config.Nvdimm || drive.Pmem {
|
||||
var blocksize int64
|
||||
@ -1260,7 +1260,7 @@ func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, dev
|
||||
case q.config.BlockDeviceDriver == config.VirtioBlockCCW:
|
||||
driver := "virtio-blk-ccw"
|
||||
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(drive.ID, types.CCW)
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(ctx, drive.ID, types.CCW)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1278,7 +1278,7 @@ func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, dev
|
||||
}
|
||||
case q.config.BlockDeviceDriver == config.VirtioBlock:
|
||||
driver := "virtio-blk-pci"
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(drive.ID, types.PCI)
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(ctx, drive.ID, types.PCI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1327,7 +1327,7 @@ func (q *qemu) hotplugAddBlockDevice(drive *config.BlockDrive, op operation, dev
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugAddVhostUserBlkDevice(vAttr *config.VhostUserDeviceAttrs, op operation, devID string) (err error) {
|
||||
func (q *qemu) hotplugAddVhostUserBlkDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation, devID string) (err error) {
|
||||
err = q.qmpMonitorCh.qmp.ExecuteCharDevUnixSocketAdd(q.qmpMonitorCh.ctx, vAttr.DevID, vAttr.SocketPath, false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1340,7 +1340,7 @@ func (q *qemu) hotplugAddVhostUserBlkDevice(vAttr *config.VhostUserDeviceAttrs,
|
||||
}()
|
||||
|
||||
driver := "vhost-user-blk-pci"
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(vAttr.DevID, types.PCI)
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(ctx, vAttr.DevID, types.PCI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1368,7 +1368,7 @@ func (q *qemu) hotplugAddVhostUserBlkDevice(vAttr *config.VhostUserDeviceAttrs,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error {
|
||||
func (q *qemu) hotplugBlockDevice(ctx context.Context, drive *config.BlockDrive, op operation) error {
|
||||
if err := q.qmpSetup(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1376,7 +1376,7 @@ func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error
|
||||
devID := "virtio-" + drive.ID
|
||||
|
||||
if op == addDevice {
|
||||
return q.hotplugAddBlockDevice(drive, op, devID)
|
||||
return q.hotplugAddBlockDevice(ctx, drive, op, devID)
|
||||
} else {
|
||||
if q.config.BlockDeviceDriver == config.VirtioBlock {
|
||||
if err := q.arch.removeDeviceFromBridge(drive.ID); err != nil {
|
||||
@ -1392,7 +1392,7 @@ func (q *qemu) hotplugBlockDevice(drive *config.BlockDrive, op operation) error
|
||||
}
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugVhostUserDevice(vAttr *config.VhostUserDeviceAttrs, op operation) error {
|
||||
func (q *qemu) hotplugVhostUserDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation) error {
|
||||
if err := q.qmpSetup(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1402,7 +1402,7 @@ func (q *qemu) hotplugVhostUserDevice(vAttr *config.VhostUserDeviceAttrs, op ope
|
||||
if op == addDevice {
|
||||
switch vAttr.Type {
|
||||
case config.VhostUserBlk:
|
||||
return q.hotplugAddVhostUserBlkDevice(vAttr, op, devID)
|
||||
return q.hotplugAddVhostUserBlkDevice(ctx, vAttr, op, devID)
|
||||
default:
|
||||
return fmt.Errorf("Incorrect vhost-user device type found")
|
||||
}
|
||||
@ -1419,7 +1419,7 @@ func (q *qemu) hotplugVhostUserDevice(vAttr *config.VhostUserDeviceAttrs, op ope
|
||||
}
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugVFIODevice(device *config.VFIODev, op operation) (err error) {
|
||||
func (q *qemu) hotplugVFIODevice(ctx context.Context, device *config.VFIODev, op operation) (err error) {
|
||||
if err = q.qmpSetup(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1466,7 +1466,7 @@ func (q *qemu) hotplugVFIODevice(device *config.VFIODev, op operation) (err erro
|
||||
}
|
||||
}
|
||||
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(devID, types.PCI)
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(ctx, devID, types.PCI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1524,7 +1524,7 @@ func (q *qemu) hotAddNetDevice(name, hardAddr string, VMFds, VhostFds []*os.File
|
||||
return q.qmpMonitorCh.qmp.ExecuteNetdevAddByFds(q.qmpMonitorCh.ctx, "tap", name, VMFdNames, VhostFdNames)
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugNetDevice(endpoint Endpoint, op operation) (err error) {
|
||||
func (q *qemu) hotplugNetDevice(ctx context.Context, endpoint Endpoint, op operation) (err error) {
|
||||
if err = q.qmpSetup(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1553,7 +1553,7 @@ func (q *qemu) hotplugNetDevice(endpoint Endpoint, op operation) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(tap.ID, types.PCI)
|
||||
addr, bridge, err := q.arch.addDeviceToBridge(ctx, tap.ID, types.PCI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1599,36 +1599,36 @@ func (q *qemu) hotplugNetDevice(endpoint Endpoint, op operation) (err error) {
|
||||
return q.qmpMonitorCh.qmp.ExecuteNetdevDel(q.qmpMonitorCh.ctx, tap.Name)
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugDevice(devInfo interface{}, devType deviceType, op operation) (interface{}, error) {
|
||||
func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType deviceType, op operation) (interface{}, error) {
|
||||
switch devType {
|
||||
case blockDev:
|
||||
drive := devInfo.(*config.BlockDrive)
|
||||
return nil, q.hotplugBlockDevice(drive, op)
|
||||
return nil, q.hotplugBlockDevice(ctx, drive, op)
|
||||
case cpuDev:
|
||||
vcpus := devInfo.(uint32)
|
||||
return q.hotplugCPUs(vcpus, op)
|
||||
case vfioDev:
|
||||
device := devInfo.(*config.VFIODev)
|
||||
return nil, q.hotplugVFIODevice(device, op)
|
||||
return nil, q.hotplugVFIODevice(ctx, device, op)
|
||||
case memoryDev:
|
||||
memdev := devInfo.(*memoryDevice)
|
||||
return q.hotplugMemory(memdev, op)
|
||||
case netDev:
|
||||
device := devInfo.(Endpoint)
|
||||
return nil, q.hotplugNetDevice(device, op)
|
||||
return nil, q.hotplugNetDevice(ctx, device, op)
|
||||
case vhostuserDev:
|
||||
vAttr := devInfo.(*config.VhostUserDeviceAttrs)
|
||||
return nil, q.hotplugVhostUserDevice(vAttr, op)
|
||||
return nil, q.hotplugVhostUserDevice(ctx, vAttr, op)
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot hotplug device: unsupported device type '%v'", devType)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := q.trace("hotplugAddDevice")
|
||||
func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, ctx := q.trace(ctx, "hotplugAddDevice")
|
||||
defer span.End()
|
||||
|
||||
data, err := q.hotplugDevice(devInfo, devType, addDevice)
|
||||
data, err := q.hotplugDevice(ctx, devInfo, devType, addDevice)
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
@ -1636,11 +1636,11 @@ func (q *qemu) hotplugAddDevice(devInfo interface{}, devType deviceType) (interf
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (q *qemu) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, _ := q.trace("hotplugRemoveDevice")
|
||||
func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
|
||||
span, ctx := q.trace(ctx, "hotplugRemoveDevice")
|
||||
defer span.End()
|
||||
|
||||
data, err := q.hotplugDevice(devInfo, devType, removeDevice)
|
||||
data, err := q.hotplugDevice(ctx, devInfo, devType, removeDevice)
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
@ -1848,24 +1848,24 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
|
||||
return memDev.sizeMB, nil
|
||||
}
|
||||
|
||||
func (q *qemu) pauseSandbox() error {
|
||||
span, _ := q.trace("pauseSandbox")
|
||||
func (q *qemu) pauseSandbox(ctx context.Context) error {
|
||||
span, ctx := q.trace(ctx, "pauseSandbox")
|
||||
defer span.End()
|
||||
|
||||
return q.togglePauseSandbox(true)
|
||||
return q.togglePauseSandbox(ctx, true)
|
||||
}
|
||||
|
||||
func (q *qemu) resumeSandbox() error {
|
||||
span, _ := q.trace("resumeSandbox")
|
||||
func (q *qemu) resumeSandbox(ctx context.Context) error {
|
||||
span, ctx := q.trace(ctx, "resumeSandbox")
|
||||
defer span.End()
|
||||
|
||||
return q.togglePauseSandbox(false)
|
||||
return q.togglePauseSandbox(ctx, false)
|
||||
}
|
||||
|
||||
// addDevice will add extra devices to Qemu command line.
|
||||
func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error {
|
||||
func (q *qemu) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
|
||||
var err error
|
||||
span, _ := q.trace("addDevice")
|
||||
span, _ := q.trace(ctx, "addDevice")
|
||||
defer span.End()
|
||||
|
||||
switch v := devInfo.(type) {
|
||||
@ -1922,8 +1922,8 @@ func (q *qemu) addDevice(devInfo interface{}, devType deviceType) error {
|
||||
|
||||
// getSandboxConsole builds the path of the console where we can read
|
||||
// logs coming from the sandbox.
|
||||
func (q *qemu) getSandboxConsole(id string) (string, string, error) {
|
||||
span, _ := q.trace("getSandboxConsole")
|
||||
func (q *qemu) getSandboxConsole(ctx context.Context, id string) (string, string, error) {
|
||||
span, _ := q.trace(ctx, "getSandboxConsole")
|
||||
defer span.End()
|
||||
|
||||
consoleURL, err := utils.BuildSocketPath(q.store.RunVMStoragePath(), id, consoleSocket)
|
||||
@ -1987,8 +1987,8 @@ func (q *qemu) waitMigration() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *qemu) disconnect() {
|
||||
span, _ := q.trace("disconnect")
|
||||
func (q *qemu) disconnect(ctx context.Context) {
|
||||
span, _ := q.trace(ctx, "disconnect")
|
||||
defer span.End()
|
||||
|
||||
q.qmpShutdown()
|
||||
@ -2005,7 +2005,7 @@ func (q *qemu) disconnect() {
|
||||
// the memory to remove has to be at least the size of one slot.
|
||||
// To return memory back we are resizing the VM memory balloon.
|
||||
// A longer term solution is evaluate solutions like virtio-mem
|
||||
func (q *qemu) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
func (q *qemu) resizeMemory(ctx context.Context, reqMemMB uint32, memoryBlockSizeMB uint32, probe bool) (uint32, memoryDevice, error) {
|
||||
|
||||
currentMemory := q.config.MemorySize + uint32(q.state.HotpluggedMemory)
|
||||
if err := q.qmpSetup(); err != nil {
|
||||
@ -2035,7 +2035,7 @@ func (q *qemu) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe boo
|
||||
addMemDevice.sizeMB = int(memHotplugMB)
|
||||
addMemDevice.probe = probe
|
||||
|
||||
data, err := q.hotplugAddDevice(&addMemDevice, memoryDev)
|
||||
data, err := q.hotplugAddDevice(ctx, &addMemDevice, memoryDev)
|
||||
if err != nil {
|
||||
return currentMemory, addMemDevice, err
|
||||
}
|
||||
@ -2055,7 +2055,7 @@ func (q *qemu) resizeMemory(reqMemMB uint32, memoryBlockSizeMB uint32, probe boo
|
||||
addMemDevice.sizeMB = int(memHotunplugMB)
|
||||
addMemDevice.probe = probe
|
||||
|
||||
data, err := q.hotplugRemoveDevice(&addMemDevice, memoryDev)
|
||||
data, err := q.hotplugRemoveDevice(ctx, &addMemDevice, memoryDev)
|
||||
if err != nil {
|
||||
return currentMemory, addMemDevice, err
|
||||
}
|
||||
@ -2192,8 +2192,8 @@ func genericAppendPCIeRootPort(devices []govmmQemu.Device, number uint32, machin
|
||||
return devices
|
||||
}
|
||||
|
||||
func (q *qemu) getThreadIDs() (vcpuThreadIDs, error) {
|
||||
span, _ := q.trace("getThreadIDs")
|
||||
func (q *qemu) getThreadIDs(ctx context.Context) (vcpuThreadIDs, error) {
|
||||
span, _ := q.trace(ctx, "getThreadIDs")
|
||||
defer span.End()
|
||||
|
||||
tid := vcpuThreadIDs{}
|
||||
@ -2225,7 +2225,7 @@ func calcHotplugMemMiBSize(mem uint32, memorySectionSizeMB uint32) (uint32, erro
|
||||
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
|
||||
}
|
||||
|
||||
func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
func (q *qemu) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint32, err error) {
|
||||
|
||||
currentVCPUs = q.config.NumVCPUs + uint32(len(q.state.HotpluggedVCPUs))
|
||||
newVCPUs = currentVCPUs
|
||||
@ -2233,7 +2233,7 @@ func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint3
|
||||
case currentVCPUs < reqVCPUs:
|
||||
//hotplug
|
||||
addCPUs := reqVCPUs - currentVCPUs
|
||||
data, err := q.hotplugAddDevice(addCPUs, cpuDev)
|
||||
data, err := q.hotplugAddDevice(ctx, addCPUs, cpuDev)
|
||||
if err != nil {
|
||||
return currentVCPUs, newVCPUs, err
|
||||
}
|
||||
@ -2245,7 +2245,7 @@ func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint3
|
||||
case currentVCPUs > reqVCPUs:
|
||||
//hotunplug
|
||||
removeCPUs := currentVCPUs - reqVCPUs
|
||||
data, err := q.hotplugRemoveDevice(removeCPUs, cpuDev)
|
||||
data, err := q.hotplugRemoveDevice(ctx, removeCPUs, cpuDev)
|
||||
if err != nil {
|
||||
return currentVCPUs, newVCPUs, err
|
||||
}
|
||||
@ -2258,8 +2258,8 @@ func (q *qemu) resizeVCPUs(reqVCPUs uint32) (currentVCPUs uint32, newVCPUs uint3
|
||||
return currentVCPUs, newVCPUs, nil
|
||||
}
|
||||
|
||||
func (q *qemu) cleanup() error {
|
||||
span, _ := q.trace("cleanup")
|
||||
func (q *qemu) cleanup(ctx context.Context) error {
|
||||
span, _ := q.trace(ctx, "cleanup")
|
||||
defer span.End()
|
||||
|
||||
for _, fd := range q.fds {
|
||||
@ -2333,10 +2333,10 @@ func (q *qemu) fromGrpc(ctx context.Context, hypervisorConfig *HypervisorConfig,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *qemu) toGrpc() ([]byte, error) {
|
||||
func (q *qemu) toGrpc(ctx context.Context) ([]byte, error) {
|
||||
q.qmpShutdown()
|
||||
|
||||
q.cleanup()
|
||||
q.cleanup(ctx)
|
||||
qp := qemuGrpc{
|
||||
ID: q.id,
|
||||
QmpChannelpath: q.qmpMonitorCh.path,
|
||||
|
@ -105,7 +105,7 @@ type qemuArch interface {
|
||||
appendRNGDevice(devices []govmmQemu.Device, rngDevice config.RNGDev) ([]govmmQemu.Device, error)
|
||||
|
||||
// addDeviceToBridge adds devices to the bus
|
||||
addDeviceToBridge(ID string, t types.Type) (string, types.Bridge, error)
|
||||
addDeviceToBridge(ctx context.Context, ID string, t types.Type) (string, types.Bridge, error)
|
||||
|
||||
// removeDeviceFromBridge removes devices to the bus
|
||||
removeDeviceFromBridge(ID string) error
|
||||
@ -722,8 +722,8 @@ func (q *qemuArchBase) setIgnoreSharedMemoryMigrationCaps(ctx context.Context, q
|
||||
return err
|
||||
}
|
||||
|
||||
func (q *qemuArchBase) addDeviceToBridge(ID string, t types.Type) (string, types.Bridge, error) {
|
||||
addr, b, err := genericAddDeviceToBridge(q.Bridges, ID, t)
|
||||
func (q *qemuArchBase) addDeviceToBridge(ctx context.Context, ID string, t types.Type) (string, types.Bridge, error) {
|
||||
addr, b, err := genericAddDeviceToBridge(ctx, q.Bridges, ID, t)
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
@ -731,7 +731,7 @@ func (q *qemuArchBase) addDeviceToBridge(ID string, t types.Type) (string, types
|
||||
return fmt.Sprintf("%02x", addr), b, nil
|
||||
}
|
||||
|
||||
func genericAddDeviceToBridge(bridges []types.Bridge, ID string, t types.Type) (uint32, types.Bridge, error) {
|
||||
func genericAddDeviceToBridge(ctx context.Context, bridges []types.Bridge, ID string, t types.Type) (uint32, types.Bridge, error) {
|
||||
var err error
|
||||
var addr uint32
|
||||
|
||||
@ -744,7 +744,7 @@ func genericAddDeviceToBridge(bridges []types.Bridge, ID string, t types.Type) (
|
||||
if t != b.Type {
|
||||
continue
|
||||
}
|
||||
addr, err = b.AddDevice(ID)
|
||||
addr, err = b.AddDevice(ctx, ID)
|
||||
if err == nil {
|
||||
return addr, b, nil
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ func testQemuAddDevice(t *testing.T, devInfo interface{}, devType deviceType, ex
|
||||
arch: &qemuArchBase{},
|
||||
}
|
||||
|
||||
err := q.addDevice(devInfo, devType)
|
||||
err := q.addDevice(context.Background(), devInfo, devType)
|
||||
assert.NoError(err)
|
||||
assert.Exactly(q.qemuConfig.Devices, expected)
|
||||
}
|
||||
@ -402,9 +402,9 @@ func TestHotplugUnsupportedDeviceType(t *testing.T) {
|
||||
config: qemuConfig,
|
||||
}
|
||||
|
||||
_, err := q.hotplugAddDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
|
||||
_, err := q.hotplugAddDevice(context.Background(), &memoryDevice{0, 128, uint64(0), false}, fsDev)
|
||||
assert.Error(err)
|
||||
_, err = q.hotplugRemoveDevice(&memoryDevice{0, 128, uint64(0), false}, fsDev)
|
||||
_, err = q.hotplugRemoveDevice(context.Background(), &memoryDevice{0, 128, uint64(0), false}, fsDev)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
|
@ -127,14 +127,14 @@ type SandboxConfig struct {
|
||||
Cgroups *configs.Cgroup
|
||||
}
|
||||
|
||||
func (s *Sandbox) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if s.ctx == nil {
|
||||
func (s *Sandbox) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
s.Logger().WithField("type", "bug").Error("trace called before context set")
|
||||
s.ctx = context.Background()
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(s.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes(otelLabel.Key("subsystem").String("sandbox"))
|
||||
|
||||
return span, ctx
|
||||
@ -283,13 +283,13 @@ func (s *Sandbox) GetContainer(containerID string) VCContainer {
|
||||
}
|
||||
|
||||
// Release closes the agent connection and removes sandbox from internal list.
|
||||
func (s *Sandbox) Release() error {
|
||||
func (s *Sandbox) Release(ctx context.Context) error {
|
||||
s.Logger().Info("release sandbox")
|
||||
if s.monitor != nil {
|
||||
s.monitor.stop()
|
||||
}
|
||||
s.hypervisor.disconnect()
|
||||
return s.agent.disconnect()
|
||||
s.hypervisor.disconnect(ctx)
|
||||
return s.agent.disconnect(ctx)
|
||||
}
|
||||
|
||||
// Status gets the status of the sandbox
|
||||
@ -323,7 +323,7 @@ func (s *Sandbox) Status() SandboxStatus {
|
||||
}
|
||||
|
||||
// Monitor returns a error channel for watcher to watch at
|
||||
func (s *Sandbox) Monitor() (chan error, error) {
|
||||
func (s *Sandbox) Monitor(ctx context.Context) (chan error, error) {
|
||||
if s.state.State != types.StateRunning {
|
||||
return nil, fmt.Errorf("Sandbox is not running")
|
||||
}
|
||||
@ -334,11 +334,11 @@ func (s *Sandbox) Monitor() (chan error, error) {
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return s.monitor.newWatcher()
|
||||
return s.monitor.newWatcher(ctx)
|
||||
}
|
||||
|
||||
// WaitProcess waits on a container process and return its exit code
|
||||
func (s *Sandbox) WaitProcess(containerID, processID string) (int32, error) {
|
||||
func (s *Sandbox) WaitProcess(ctx context.Context, containerID, processID string) (int32, error) {
|
||||
if s.state.State != types.StateRunning {
|
||||
return 0, fmt.Errorf("Sandbox not running")
|
||||
}
|
||||
@ -348,12 +348,12 @@ func (s *Sandbox) WaitProcess(containerID, processID string) (int32, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return c.wait(processID)
|
||||
return c.wait(ctx, processID)
|
||||
}
|
||||
|
||||
// SignalProcess sends a signal to a process of a container when all is false.
|
||||
// When all is true, it sends the signal to all processes of a container.
|
||||
func (s *Sandbox) SignalProcess(containerID, processID string, signal syscall.Signal, all bool) error {
|
||||
func (s *Sandbox) SignalProcess(ctx context.Context, containerID, processID string, signal syscall.Signal, all bool) error {
|
||||
if s.state.State != types.StateRunning {
|
||||
return fmt.Errorf("Sandbox not running")
|
||||
}
|
||||
@ -363,11 +363,11 @@ func (s *Sandbox) SignalProcess(containerID, processID string, signal syscall.Si
|
||||
return err
|
||||
}
|
||||
|
||||
return c.signalProcess(processID, signal, all)
|
||||
return c.signalProcess(ctx, processID, signal, all)
|
||||
}
|
||||
|
||||
// WinsizeProcess resizes the tty window of a process
|
||||
func (s *Sandbox) WinsizeProcess(containerID, processID string, height, width uint32) error {
|
||||
func (s *Sandbox) WinsizeProcess(ctx context.Context, containerID, processID string, height, width uint32) error {
|
||||
if s.state.State != types.StateRunning {
|
||||
return fmt.Errorf("Sandbox not running")
|
||||
}
|
||||
@ -377,7 +377,7 @@ func (s *Sandbox) WinsizeProcess(containerID, processID string, height, width ui
|
||||
return err
|
||||
}
|
||||
|
||||
return c.winsizeProcess(processID, height, width)
|
||||
return c.winsizeProcess(ctx, processID, height, width)
|
||||
}
|
||||
|
||||
// IOStream returns stdin writer, stdout reader and stderr reader of a process
|
||||
@ -419,8 +419,8 @@ func createAssets(ctx context.Context, sandboxConfig *SandboxConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) getAndStoreGuestDetails() error {
|
||||
guestDetailRes, err := s.agent.getGuestDetails(&grpc.GuestDetailsRequest{
|
||||
func (s *Sandbox) getAndStoreGuestDetails(ctx context.Context) error {
|
||||
guestDetailRes, err := s.agent.getGuestDetails(ctx, &grpc.GuestDetailsRequest{
|
||||
MemBlockSize: true,
|
||||
MemHotplugProbe: true,
|
||||
})
|
||||
@ -470,7 +470,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
|
||||
}
|
||||
|
||||
// Below code path is called only during create, because of earlier check.
|
||||
if err := s.agent.createSandbox(s); err != nil {
|
||||
if err := s.agent.createSandbox(ctx, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -617,8 +617,8 @@ func (s *Sandbox) createCgroupManager() error {
|
||||
}
|
||||
|
||||
// storeSandbox stores a sandbox config.
|
||||
func (s *Sandbox) storeSandbox() error {
|
||||
span, _ := s.trace("storeSandbox")
|
||||
func (s *Sandbox) storeSandbox(ctx context.Context) error {
|
||||
span, ctx := s.trace(ctx, "storeSandbox")
|
||||
defer span.End()
|
||||
|
||||
// flush data to storage
|
||||
@ -688,7 +688,7 @@ func (s *Sandbox) removeContainer(containerID string) error {
|
||||
|
||||
// Delete deletes an already created sandbox.
|
||||
// The VM in which the sandbox is running will be shut down.
|
||||
func (s *Sandbox) Delete() error {
|
||||
func (s *Sandbox) Delete(ctx context.Context) error {
|
||||
if s.state.State != types.StateReady &&
|
||||
s.state.State != types.StatePaused &&
|
||||
s.state.State != types.StateStopped {
|
||||
@ -696,7 +696,7 @@ func (s *Sandbox) Delete() error {
|
||||
}
|
||||
|
||||
for _, c := range s.containers {
|
||||
if err := c.delete(); err != nil {
|
||||
if err := c.delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -711,17 +711,18 @@ func (s *Sandbox) Delete() error {
|
||||
s.monitor.stop()
|
||||
}
|
||||
|
||||
if err := s.hypervisor.cleanup(); err != nil {
|
||||
if err := s.hypervisor.cleanup(ctx); err != nil {
|
||||
s.Logger().WithError(err).Error("failed to cleanup hypervisor")
|
||||
}
|
||||
|
||||
s.agent.cleanup(s)
|
||||
s.agent.cleanup(ctx, s)
|
||||
|
||||
return s.newStore.Destroy(s.id)
|
||||
}
|
||||
|
||||
func (s *Sandbox) startNetworkMonitor() error {
|
||||
span, _ := s.trace("startNetworkMonitor")
|
||||
func (s *Sandbox) startNetworkMonitor(ctx context.Context) error {
|
||||
var span otelTrace.Span
|
||||
span, ctx = s.trace(ctx, "startNetworkMonitor")
|
||||
defer span.End()
|
||||
|
||||
binPath, err := os.Executable()
|
||||
@ -742,7 +743,7 @@ func (s *Sandbox) startNetworkMonitor() error {
|
||||
sandboxID: s.id,
|
||||
}
|
||||
|
||||
return s.network.Run(s.networkNS.NetNsPath, func() error {
|
||||
return s.network.Run(ctx, s.networkNS.NetNsPath, func() error {
|
||||
pid, err := startNetmon(params)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -754,13 +755,13 @@ func (s *Sandbox) startNetworkMonitor() error {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Sandbox) createNetwork() error {
|
||||
func (s *Sandbox) createNetwork(ctx context.Context) error {
|
||||
if s.config.NetworkConfig.DisableNewNetNs ||
|
||||
s.config.NetworkConfig.NetNSPath == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
span, _ := s.trace("createNetwork")
|
||||
span, ctx := s.trace(ctx, "createNetwork")
|
||||
defer span.End()
|
||||
|
||||
s.networkNS = NetworkNamespace{
|
||||
@ -772,7 +773,7 @@ func (s *Sandbox) createNetwork() error {
|
||||
// after vm is started.
|
||||
if s.factory == nil {
|
||||
// Add the network
|
||||
endpoints, err := s.network.Add(s.ctx, &s.config.NetworkConfig, s, false)
|
||||
endpoints, err := s.network.Add(ctx, &s.config.NetworkConfig, s, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -780,7 +781,7 @@ func (s *Sandbox) createNetwork() error {
|
||||
s.networkNS.Endpoints = endpoints
|
||||
|
||||
if s.config.NetworkConfig.NetmonConfig.Enable {
|
||||
if err := s.startNetworkMonitor(); err != nil {
|
||||
if err := s.startNetworkMonitor(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -788,13 +789,14 @@ func (s *Sandbox) createNetwork() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) postCreatedNetwork() error {
|
||||
func (s *Sandbox) postCreatedNetwork(ctx context.Context) error {
|
||||
|
||||
return s.network.PostAdd(s.ctx, &s.networkNS, s.factory != nil)
|
||||
return s.network.PostAdd(ctx, &s.networkNS, s.factory != nil)
|
||||
}
|
||||
|
||||
func (s *Sandbox) removeNetwork() error {
|
||||
span, _ := s.trace("removeNetwork")
|
||||
func (s *Sandbox) removeNetwork(ctx context.Context) error {
|
||||
var span otelTrace.Span
|
||||
span, ctx = s.trace(ctx, "removeNetwork")
|
||||
defer span.End()
|
||||
|
||||
if s.config.NetworkConfig.NetmonConfig.Enable {
|
||||
@ -803,7 +805,7 @@ func (s *Sandbox) removeNetwork() error {
|
||||
}
|
||||
}
|
||||
|
||||
return s.network.Remove(s.ctx, &s.networkNS, s.hypervisor)
|
||||
return s.network.Remove(ctx, &s.networkNS, s.hypervisor)
|
||||
}
|
||||
|
||||
func (s *Sandbox) generateNetInfo(inf *pbTypes.Interface) (NetworkInfo, error) {
|
||||
@ -837,7 +839,7 @@ func (s *Sandbox) generateNetInfo(inf *pbTypes.Interface) (NetworkInfo, error) {
|
||||
}
|
||||
|
||||
// AddInterface adds new nic to the sandbox.
|
||||
func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
func (s *Sandbox) AddInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
netInfo, err := s.generateNetInfo(inf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -851,7 +853,7 @@ func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, erro
|
||||
endpoint.SetProperties(netInfo)
|
||||
if err := doNetNS(s.networkNS.NetNsPath, func(_ ns.NetNS) error {
|
||||
s.Logger().WithField("endpoint-type", endpoint.Type()).Info("Hot attaching endpoint")
|
||||
return endpoint.HotAttach(s.hypervisor)
|
||||
return endpoint.HotAttach(ctx, s.hypervisor)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -864,15 +866,15 @@ func (s *Sandbox) AddInterface(inf *pbTypes.Interface) (*pbTypes.Interface, erro
|
||||
|
||||
// Add network for vm
|
||||
inf.PciPath = endpoint.PciPath().String()
|
||||
return s.agent.updateInterface(inf)
|
||||
return s.agent.updateInterface(ctx, inf)
|
||||
}
|
||||
|
||||
// RemoveInterface removes a nic of the sandbox.
|
||||
func (s *Sandbox) RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
func (s *Sandbox) RemoveInterface(ctx context.Context, inf *pbTypes.Interface) (*pbTypes.Interface, error) {
|
||||
for i, endpoint := range s.networkNS.Endpoints {
|
||||
if endpoint.HardwareAddr() == inf.HwAddr {
|
||||
s.Logger().WithField("endpoint-type", endpoint.Type()).Info("Hot detaching endpoint")
|
||||
if err := endpoint.HotDetach(s.hypervisor, s.networkNS.NetNsCreated, s.networkNS.NetNsPath); err != nil {
|
||||
if err := endpoint.HotDetach(ctx, s.hypervisor, s.networkNS.NetNsCreated, s.networkNS.NetNsPath); err != nil {
|
||||
return inf, err
|
||||
}
|
||||
s.networkNS.Endpoints = append(s.networkNS.Endpoints[:i], s.networkNS.Endpoints[i+1:]...)
|
||||
@ -888,18 +890,18 @@ func (s *Sandbox) RemoveInterface(inf *pbTypes.Interface) (*pbTypes.Interface, e
|
||||
}
|
||||
|
||||
// ListInterfaces lists all nics and their configurations in the sandbox.
|
||||
func (s *Sandbox) ListInterfaces() ([]*pbTypes.Interface, error) {
|
||||
return s.agent.listInterfaces()
|
||||
func (s *Sandbox) ListInterfaces(ctx context.Context) ([]*pbTypes.Interface, error) {
|
||||
return s.agent.listInterfaces(ctx)
|
||||
}
|
||||
|
||||
// UpdateRoutes updates the sandbox route table (e.g. for portmapping support).
|
||||
func (s *Sandbox) UpdateRoutes(routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
return s.agent.updateRoutes(routes)
|
||||
func (s *Sandbox) UpdateRoutes(ctx context.Context, routes []*pbTypes.Route) ([]*pbTypes.Route, error) {
|
||||
return s.agent.updateRoutes(ctx, routes)
|
||||
}
|
||||
|
||||
// ListRoutes lists all routes and their configurations in the sandbox.
|
||||
func (s *Sandbox) ListRoutes() ([]*pbTypes.Route, error) {
|
||||
return s.agent.listRoutes()
|
||||
func (s *Sandbox) ListRoutes(ctx context.Context) ([]*pbTypes.Route, error) {
|
||||
return s.agent.listRoutes(ctx)
|
||||
}
|
||||
|
||||
const (
|
||||
@ -918,13 +920,13 @@ type consoleWatcher struct {
|
||||
ptyConsole *os.File
|
||||
}
|
||||
|
||||
func newConsoleWatcher(s *Sandbox) (*consoleWatcher, error) {
|
||||
func newConsoleWatcher(ctx context.Context, s *Sandbox) (*consoleWatcher, error) {
|
||||
var (
|
||||
err error
|
||||
cw consoleWatcher
|
||||
)
|
||||
|
||||
cw.proto, cw.consoleURL, err = s.hypervisor.getSandboxConsole(s.id)
|
||||
cw.proto, cw.consoleURL, err = s.hypervisor.getSandboxConsole(ctx, s.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1000,22 +1002,22 @@ func (cw *consoleWatcher) stop() {
|
||||
}
|
||||
|
||||
// startVM starts the VM.
|
||||
func (s *Sandbox) startVM() (err error) {
|
||||
span, ctx := s.trace("startVM")
|
||||
func (s *Sandbox) startVM(ctx context.Context) (err error) {
|
||||
span, ctx := s.trace(ctx, "startVM")
|
||||
defer span.End()
|
||||
|
||||
s.Logger().Info("Starting VM")
|
||||
|
||||
if s.config.HypervisorConfig.Debug {
|
||||
// create console watcher
|
||||
consoleWatcher, err := newConsoleWatcher(s)
|
||||
consoleWatcher, err := newConsoleWatcher(ctx, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.cw = consoleWatcher
|
||||
}
|
||||
|
||||
if err := s.network.Run(s.networkNS.NetNsPath, func() error {
|
||||
if err := s.network.Run(ctx, s.networkNS.NetNsPath, func() error {
|
||||
if s.factory != nil {
|
||||
vm, err := s.factory.GetVM(ctx, VMConfig{
|
||||
HypervisorType: s.config.HypervisorType,
|
||||
@ -1029,21 +1031,21 @@ func (s *Sandbox) startVM() (err error) {
|
||||
return vm.assignSandbox(s)
|
||||
}
|
||||
|
||||
return s.hypervisor.startSandbox(vmStartTimeout)
|
||||
return s.hypervisor.startSandbox(ctx, vmStartTimeout)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.hypervisor.stopSandbox()
|
||||
s.hypervisor.stopSandbox(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
// In case of vm factory, network interfaces are hotplugged
|
||||
// after vm is started.
|
||||
if s.factory != nil {
|
||||
endpoints, err := s.network.Add(s.ctx, &s.config.NetworkConfig, s, true)
|
||||
endpoints, err := s.network.Add(ctx, &s.config.NetworkConfig, s, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1051,7 +1053,7 @@ func (s *Sandbox) startVM() (err error) {
|
||||
s.networkNS.Endpoints = endpoints
|
||||
|
||||
if s.config.NetworkConfig.NetmonConfig.Enable {
|
||||
if err := s.startNetworkMonitor(); err != nil {
|
||||
if err := s.startNetworkMonitor(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1071,7 +1073,7 @@ func (s *Sandbox) startVM() (err error) {
|
||||
// we want to guarantee that it is manageable.
|
||||
// For that we need to ask the agent to start the
|
||||
// sandbox inside the VM.
|
||||
if err := s.agent.startSandbox(s); err != nil {
|
||||
if err := s.agent.startSandbox(ctx, s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1081,12 +1083,12 @@ func (s *Sandbox) startVM() (err error) {
|
||||
}
|
||||
|
||||
// stopVM: stop the sandbox's VM
|
||||
func (s *Sandbox) stopVM() error {
|
||||
span, _ := s.trace("stopVM")
|
||||
func (s *Sandbox) stopVM(ctx context.Context) error {
|
||||
span, ctx := s.trace(ctx, "stopVM")
|
||||
defer span.End()
|
||||
|
||||
s.Logger().Info("Stopping sandbox in the VM")
|
||||
if err := s.agent.stopSandbox(s); err != nil {
|
||||
if err := s.agent.stopSandbox(ctx, s); err != nil {
|
||||
s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("Agent did not stop sandbox")
|
||||
}
|
||||
|
||||
@ -1097,7 +1099,7 @@ func (s *Sandbox) stopVM() error {
|
||||
}
|
||||
|
||||
s.Logger().Info("Stopping VM")
|
||||
return s.hypervisor.stopSandbox()
|
||||
return s.hypervisor.stopSandbox(ctx)
|
||||
}
|
||||
|
||||
func (s *Sandbox) addContainer(c *Container) error {
|
||||
@ -1112,9 +1114,9 @@ func (s *Sandbox) addContainer(c *Container) error {
|
||||
// CreateContainer creates a new container in the sandbox
|
||||
// This should be called only when the sandbox is already created.
|
||||
// It will add new container config to sandbox.config.Containers
|
||||
func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, error) {
|
||||
func (s *Sandbox) CreateContainer(ctx context.Context, contConfig ContainerConfig) (VCContainer, error) {
|
||||
// Create the container object, add devices to the sandbox's device-manager:
|
||||
c, err := newContainer(s, &contConfig)
|
||||
c, err := newContainer(ctx, s, &contConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1132,7 +1134,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
|
||||
}()
|
||||
|
||||
// create and start the container
|
||||
err = c.create()
|
||||
err = c.create(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1148,7 +1150,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
|
||||
logger := s.Logger().WithFields(logrus.Fields{"container-id": c.id, "sandox-id": s.id, "rollback": true})
|
||||
logger.WithError(err).Error("Cleaning up partially created container")
|
||||
|
||||
if err2 := c.stop(true); err2 != nil {
|
||||
if err2 := c.stop(ctx, true); err2 != nil {
|
||||
logger.WithError(err2).Error("Could not delete container")
|
||||
}
|
||||
|
||||
@ -1160,16 +1162,16 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
|
||||
// Sandbox is responsible to update VM resources needed by Containers
|
||||
// Update resources after having added containers to the sandbox, since
|
||||
// container status is requiered to know if more resources should be added.
|
||||
err = s.updateResources()
|
||||
err = s.updateResources(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.cgroupsUpdate(); err != nil {
|
||||
if err = s.cgroupsUpdate(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1177,7 +1179,7 @@ func (s *Sandbox) CreateContainer(contConfig ContainerConfig) (VCContainer, erro
|
||||
}
|
||||
|
||||
// StartContainer starts a container in the sandbox
|
||||
func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) {
|
||||
func (s *Sandbox) StartContainer(ctx context.Context, containerID string) (VCContainer, error) {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
@ -1185,12 +1187,12 @@ func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) {
|
||||
}
|
||||
|
||||
// Start it.
|
||||
err = c.start()
|
||||
err = c.start(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1198,7 +1200,7 @@ func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) {
|
||||
|
||||
// Update sandbox resources in case a stopped container
|
||||
// is started
|
||||
err = s.updateResources()
|
||||
err = s.updateResources(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1207,7 +1209,7 @@ func (s *Sandbox) StartContainer(containerID string) (VCContainer, error) {
|
||||
}
|
||||
|
||||
// StopContainer stops a container in the sandbox
|
||||
func (s *Sandbox) StopContainer(containerID string, force bool) (VCContainer, error) {
|
||||
func (s *Sandbox) StopContainer(ctx context.Context, containerID string, force bool) (VCContainer, error) {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
@ -1215,18 +1217,18 @@ func (s *Sandbox) StopContainer(containerID string, force bool) (VCContainer, er
|
||||
}
|
||||
|
||||
// Stop it.
|
||||
if err := c.stop(force); err != nil {
|
||||
if err := c.stop(ctx, force); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// KillContainer signals a container in the sandbox
|
||||
func (s *Sandbox) KillContainer(containerID string, signal syscall.Signal, all bool) error {
|
||||
func (s *Sandbox) KillContainer(ctx context.Context, containerID string, signal syscall.Signal, all bool) error {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
@ -1234,7 +1236,7 @@ func (s *Sandbox) KillContainer(containerID string, signal syscall.Signal, all b
|
||||
}
|
||||
|
||||
// Send a signal to the process.
|
||||
err = c.kill(signal, all)
|
||||
err = c.kill(ctx, signal, all)
|
||||
|
||||
// SIGKILL should never fail otherwise it is
|
||||
// impossible to clean things up.
|
||||
@ -1246,7 +1248,7 @@ func (s *Sandbox) KillContainer(containerID string, signal syscall.Signal, all b
|
||||
}
|
||||
|
||||
// DeleteContainer deletes a container from the sandbox
|
||||
func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
|
||||
func (s *Sandbox) DeleteContainer(ctx context.Context, containerID string) (VCContainer, error) {
|
||||
if containerID == "" {
|
||||
return nil, vcTypes.ErrNeedContainerID
|
||||
}
|
||||
@ -1258,7 +1260,7 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
|
||||
}
|
||||
|
||||
// Delete it.
|
||||
err = c.delete()
|
||||
err = c.delete(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1272,11 +1274,11 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
|
||||
}
|
||||
|
||||
// update the sandbox cgroup
|
||||
if err = s.cgroupsUpdate(); err != nil {
|
||||
if err = s.cgroupsUpdate(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
@ -1284,7 +1286,7 @@ func (s *Sandbox) DeleteContainer(containerID string) (VCContainer, error) {
|
||||
|
||||
// ProcessListContainer lists every process running inside a specific
|
||||
// container in the sandbox.
|
||||
func (s *Sandbox) ProcessListContainer(containerID string, options ProcessListOptions) (ProcessList, error) {
|
||||
func (s *Sandbox) ProcessListContainer(ctx context.Context, containerID string, options ProcessListOptions) (ProcessList, error) {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
@ -1292,7 +1294,7 @@ func (s *Sandbox) ProcessListContainer(containerID string, options ProcessListOp
|
||||
}
|
||||
|
||||
// Get the process list related to the container.
|
||||
return c.processList(options)
|
||||
return c.processList(ctx, options)
|
||||
}
|
||||
|
||||
// StatusContainer gets the status of a container
|
||||
@ -1323,7 +1325,7 @@ func (s *Sandbox) StatusContainer(containerID string) (ContainerStatus, error) {
|
||||
|
||||
// EnterContainer is the virtcontainers container command execution entry point.
|
||||
// EnterContainer enters an already running container and runs a given command.
|
||||
func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (VCContainer, *Process, error) {
|
||||
func (s *Sandbox) EnterContainer(ctx context.Context, containerID string, cmd types.Cmd) (VCContainer, *Process, error) {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
@ -1331,7 +1333,7 @@ func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (VCContainer
|
||||
}
|
||||
|
||||
// Enter it.
|
||||
process, err := c.enter(cmd)
|
||||
process, err := c.enter(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -1340,37 +1342,37 @@ func (s *Sandbox) EnterContainer(containerID string, cmd types.Cmd) (VCContainer
|
||||
}
|
||||
|
||||
// UpdateContainer update a running container.
|
||||
func (s *Sandbox) UpdateContainer(containerID string, resources specs.LinuxResources) error {
|
||||
func (s *Sandbox) UpdateContainer(ctx context.Context, containerID string, resources specs.LinuxResources) error {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.update(resources)
|
||||
err = c.update(ctx, resources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.cgroupsUpdate(); err != nil {
|
||||
if err := s.cgroupsUpdate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatsContainer return the stats of a running container
|
||||
func (s *Sandbox) StatsContainer(containerID string) (ContainerStats, error) {
|
||||
func (s *Sandbox) StatsContainer(ctx context.Context, containerID string) (ContainerStats, error) {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
return ContainerStats{}, err
|
||||
}
|
||||
|
||||
stats, err := c.stats()
|
||||
stats, err := c.stats(ctx)
|
||||
if err != nil {
|
||||
return ContainerStats{}, err
|
||||
}
|
||||
@ -1378,7 +1380,7 @@ func (s *Sandbox) StatsContainer(containerID string) (ContainerStats, error) {
|
||||
}
|
||||
|
||||
// Stats returns the stats of a running sandbox
|
||||
func (s *Sandbox) Stats() (SandboxStats, error) {
|
||||
func (s *Sandbox) Stats(ctx context.Context) (SandboxStats, error) {
|
||||
if s.state.CgroupPath == "" {
|
||||
return SandboxStats{}, fmt.Errorf("sandbox cgroup path is empty")
|
||||
}
|
||||
@ -1408,7 +1410,7 @@ func (s *Sandbox) Stats() (SandboxStats, error) {
|
||||
|
||||
stats.CgroupStats.CPUStats.CPUUsage.TotalUsage = metrics.CPU.Usage.Total
|
||||
stats.CgroupStats.MemoryStats.Usage.Usage = metrics.Memory.Usage.Usage
|
||||
tids, err := s.hypervisor.getThreadIDs()
|
||||
tids, err := s.hypervisor.getThreadIDs(ctx)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
@ -1418,7 +1420,7 @@ func (s *Sandbox) Stats() (SandboxStats, error) {
|
||||
}
|
||||
|
||||
// PauseContainer pauses a running container.
|
||||
func (s *Sandbox) PauseContainer(containerID string) error {
|
||||
func (s *Sandbox) PauseContainer(ctx context.Context, containerID string) error {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
@ -1426,18 +1428,18 @@ func (s *Sandbox) PauseContainer(containerID string) error {
|
||||
}
|
||||
|
||||
// Pause the container.
|
||||
if err := c.pause(); err != nil {
|
||||
if err := c.pause(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResumeContainer resumes a paused container.
|
||||
func (s *Sandbox) ResumeContainer(containerID string) error {
|
||||
func (s *Sandbox) ResumeContainer(ctx context.Context, containerID string) error {
|
||||
// Fetch the container.
|
||||
c, err := s.findContainer(containerID)
|
||||
if err != nil {
|
||||
@ -1445,11 +1447,11 @@ func (s *Sandbox) ResumeContainer(containerID string) error {
|
||||
}
|
||||
|
||||
// Resume the container.
|
||||
if err := c.resume(); err != nil {
|
||||
if err := c.resume(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = s.storeSandbox(); err != nil {
|
||||
if err = s.storeSandbox(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -1457,17 +1459,17 @@ func (s *Sandbox) ResumeContainer(containerID string) error {
|
||||
|
||||
// createContainers registers all containers, create the
|
||||
// containers in the guest and starts one shim per container.
|
||||
func (s *Sandbox) createContainers() error {
|
||||
span, _ := s.trace("createContainers")
|
||||
func (s *Sandbox) createContainers(ctx context.Context) error {
|
||||
span, ctx := s.trace(ctx, "createContainers")
|
||||
defer span.End()
|
||||
|
||||
for _, contConfig := range s.config.Containers {
|
||||
|
||||
c, err := newContainer(s, &contConfig)
|
||||
c, err := newContainer(ctx, s, &contConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.create(); err != nil {
|
||||
if err := c.create(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1478,14 +1480,14 @@ func (s *Sandbox) createContainers() error {
|
||||
|
||||
// Update resources after having added containers to the sandbox, since
|
||||
// container status is requiered to know if more resources should be added.
|
||||
if err := s.updateResources(); err != nil {
|
||||
if err := s.updateResources(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.cgroupsUpdate(); err != nil {
|
||||
if err := s.cgroupsUpdate(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.storeSandbox(); err != nil {
|
||||
if err := s.storeSandbox(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1494,7 +1496,7 @@ func (s *Sandbox) createContainers() error {
|
||||
|
||||
// Start starts a sandbox. The containers that are making the sandbox
|
||||
// will be started.
|
||||
func (s *Sandbox) Start() error {
|
||||
func (s *Sandbox) Start(ctx context.Context) error {
|
||||
if err := s.state.ValidTransition(s.state.State, types.StateRunning); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1512,12 +1514,12 @@ func (s *Sandbox) Start() error {
|
||||
}
|
||||
}()
|
||||
for _, c := range s.containers {
|
||||
if startErr = c.start(); startErr != nil {
|
||||
if startErr = c.start(ctx); startErr != nil {
|
||||
return startErr
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.storeSandbox(); err != nil {
|
||||
if err := s.storeSandbox(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1529,8 +1531,8 @@ func (s *Sandbox) Start() error {
|
||||
// Stop stops a sandbox. The containers that are making the sandbox
|
||||
// will be destroyed.
|
||||
// When force is true, ignore guest related stop failures.
|
||||
func (s *Sandbox) Stop(force bool) error {
|
||||
span, _ := s.trace("stop")
|
||||
func (s *Sandbox) Stop(ctx context.Context, force bool) error {
|
||||
span, ctx := s.trace(ctx, "Stop")
|
||||
defer span.End()
|
||||
|
||||
if s.state.State == types.StateStopped {
|
||||
@ -1543,12 +1545,12 @@ func (s *Sandbox) Stop(force bool) error {
|
||||
}
|
||||
|
||||
for _, c := range s.containers {
|
||||
if err := c.stop(force); err != nil {
|
||||
if err := c.stop(ctx, force); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.stopVM(); err != nil && !force {
|
||||
if err := s.stopVM(ctx); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1563,16 +1565,16 @@ func (s *Sandbox) Stop(force bool) error {
|
||||
}
|
||||
|
||||
// Remove the network.
|
||||
if err := s.removeNetwork(); err != nil && !force {
|
||||
if err := s.removeNetwork(ctx); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.storeSandbox(); err != nil {
|
||||
if err := s.storeSandbox(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop communicating with the agent.
|
||||
if err := s.agent.disconnect(); err != nil && !force {
|
||||
if err := s.agent.disconnect(ctx); err != nil && !force {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1640,8 +1642,8 @@ func (s *Sandbox) unsetSandboxBlockIndex(index int) error {
|
||||
|
||||
// HotplugAddDevice is used for add a device to sandbox
|
||||
// Sandbox implement DeviceReceiver interface from device/api/interface.go
|
||||
func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType) error {
|
||||
span, _ := s.trace("HotplugAddDevice")
|
||||
func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devType config.DeviceType) error {
|
||||
span, ctx := s.trace(ctx, "HotplugAddDevice")
|
||||
defer span.End()
|
||||
|
||||
if s.config.SandboxCgroupOnly {
|
||||
@ -1649,7 +1651,7 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
|
||||
// the device cgroup MUST be updated since the hypervisor
|
||||
// will need access to such device
|
||||
hdev := device.GetHostPath()
|
||||
if err := s.cgroupMgr.AddDevice(hdev); err != nil {
|
||||
if err := s.cgroupMgr.AddDevice(ctx, hdev); err != nil {
|
||||
s.Logger().WithError(err).WithField("device", hdev).
|
||||
Warn("Could not add device to cgroup")
|
||||
}
|
||||
@ -1664,7 +1666,7 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
|
||||
|
||||
// adding a group of VFIO devices
|
||||
for _, dev := range vfioDevices {
|
||||
if _, err := s.hypervisor.hotplugAddDevice(dev, vfioDev); err != nil {
|
||||
if _, err := s.hypervisor.hotplugAddDevice(ctx, dev, vfioDev); err != nil {
|
||||
s.Logger().
|
||||
WithFields(logrus.Fields{
|
||||
"sandbox": s.id,
|
||||
@ -1680,14 +1682,14 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
|
||||
if !ok {
|
||||
return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
|
||||
}
|
||||
_, err := s.hypervisor.hotplugAddDevice(blockDevice.BlockDrive, blockDev)
|
||||
_, err := s.hypervisor.hotplugAddDevice(ctx, blockDevice.BlockDrive, blockDev)
|
||||
return err
|
||||
case config.VhostUserBlk:
|
||||
vhostUserBlkDevice, ok := device.(*drivers.VhostUserBlkDevice)
|
||||
if !ok {
|
||||
return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
|
||||
}
|
||||
_, err := s.hypervisor.hotplugAddDevice(vhostUserBlkDevice.VhostUserDeviceAttrs, vhostuserDev)
|
||||
_, err := s.hypervisor.hotplugAddDevice(ctx, vhostUserBlkDevice.VhostUserDeviceAttrs, vhostuserDev)
|
||||
return err
|
||||
case config.DeviceGeneric:
|
||||
// TODO: what?
|
||||
@ -1698,7 +1700,7 @@ func (s *Sandbox) HotplugAddDevice(device api.Device, devType config.DeviceType)
|
||||
|
||||
// HotplugRemoveDevice is used for removing a device from sandbox
|
||||
// Sandbox implement DeviceReceiver interface from device/api/interface.go
|
||||
func (s *Sandbox) HotplugRemoveDevice(device api.Device, devType config.DeviceType) error {
|
||||
func (s *Sandbox) HotplugRemoveDevice(ctx context.Context, device api.Device, devType config.DeviceType) error {
|
||||
defer func() {
|
||||
if s.config.SandboxCgroupOnly {
|
||||
// Remove device from cgroup, the hypervisor
|
||||
@ -1720,7 +1722,7 @@ func (s *Sandbox) HotplugRemoveDevice(device api.Device, devType config.DeviceTy
|
||||
|
||||
// remove a group of VFIO devices
|
||||
for _, dev := range vfioDevices {
|
||||
if _, err := s.hypervisor.hotplugRemoveDevice(dev, vfioDev); err != nil {
|
||||
if _, err := s.hypervisor.hotplugRemoveDevice(ctx, dev, vfioDev); err != nil {
|
||||
s.Logger().WithError(err).
|
||||
WithFields(logrus.Fields{
|
||||
"sandbox": s.id,
|
||||
@ -1736,14 +1738,14 @@ func (s *Sandbox) HotplugRemoveDevice(device api.Device, devType config.DeviceTy
|
||||
if !ok {
|
||||
return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
|
||||
}
|
||||
_, err := s.hypervisor.hotplugRemoveDevice(blockDrive, blockDev)
|
||||
_, err := s.hypervisor.hotplugRemoveDevice(ctx, blockDrive, blockDev)
|
||||
return err
|
||||
case config.VhostUserBlk:
|
||||
vhostUserDeviceAttrs, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
|
||||
if !ok {
|
||||
return fmt.Errorf("device type mismatch, expect device type to be %s", devType)
|
||||
}
|
||||
_, err := s.hypervisor.hotplugRemoveDevice(vhostUserDeviceAttrs, vhostuserDev)
|
||||
_, err := s.hypervisor.hotplugRemoveDevice(ctx, vhostUserDeviceAttrs, vhostuserDev)
|
||||
return err
|
||||
case config.DeviceGeneric:
|
||||
// TODO: what?
|
||||
@ -1767,14 +1769,14 @@ func (s *Sandbox) UnsetSandboxBlockIndex(index int) error {
|
||||
// AppendDevice can only handle vhost user device currently, it adds a
|
||||
// vhost user device to sandbox
|
||||
// Sandbox implement DeviceReceiver interface from device/api/interface.go
|
||||
func (s *Sandbox) AppendDevice(device api.Device) error {
|
||||
func (s *Sandbox) AppendDevice(ctx context.Context, device api.Device) error {
|
||||
switch device.DeviceType() {
|
||||
case config.VhostUserSCSI, config.VhostUserNet, config.VhostUserBlk, config.VhostUserFS:
|
||||
return s.hypervisor.addDevice(device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), vhostuserDev)
|
||||
return s.hypervisor.addDevice(ctx, device.GetDeviceInfo().(*config.VhostUserDeviceAttrs), vhostuserDev)
|
||||
case config.DeviceVFIO:
|
||||
vfioDevs := device.GetDeviceInfo().([]*config.VFIODev)
|
||||
for _, d := range vfioDevs {
|
||||
return s.hypervisor.addDevice(*d, vfioDev)
|
||||
return s.hypervisor.addDevice(ctx, *d, vfioDev)
|
||||
}
|
||||
default:
|
||||
s.Logger().WithField("device-type", device.DeviceType()).
|
||||
@ -1785,7 +1787,7 @@ func (s *Sandbox) AppendDevice(device api.Device) error {
|
||||
}
|
||||
|
||||
// AddDevice will add a device to sandbox
|
||||
func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
|
||||
func (s *Sandbox) AddDevice(ctx context.Context, info config.DeviceInfo) (api.Device, error) {
|
||||
if s.devManager == nil {
|
||||
return nil, fmt.Errorf("device manager isn't initialized")
|
||||
}
|
||||
@ -1801,12 +1803,12 @@ func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
if err = s.devManager.AttachDevice(b.DeviceID(), s); err != nil {
|
||||
if err = s.devManager.AttachDevice(ctx, b.DeviceID(), s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.devManager.DetachDevice(b.DeviceID(), s)
|
||||
s.devManager.DetachDevice(ctx, b.DeviceID(), s)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -1819,7 +1821,7 @@ func (s *Sandbox) AddDevice(info config.DeviceInfo) (api.Device, error) {
|
||||
// on the sum of container requests, plus default CPUs for the VM. Similar is done for memory.
|
||||
// If changes in memory or CPU are made, the VM will be updated and the agent will online the
|
||||
// applicable CPU and memory.
|
||||
func (s *Sandbox) updateResources() error {
|
||||
func (s *Sandbox) updateResources(ctx context.Context) error {
|
||||
if s == nil {
|
||||
return errors.New("sandbox is nil")
|
||||
}
|
||||
@ -1841,7 +1843,7 @@ func (s *Sandbox) updateResources() error {
|
||||
|
||||
// Update VCPUs
|
||||
s.Logger().WithField("cpus-sandbox", sandboxVCPUs).Debugf("Request to hypervisor to update vCPUs")
|
||||
oldCPUs, newCPUs, err := s.hypervisor.resizeVCPUs(sandboxVCPUs)
|
||||
oldCPUs, newCPUs, err := s.hypervisor.resizeVCPUs(ctx, sandboxVCPUs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1851,7 +1853,7 @@ func (s *Sandbox) updateResources() error {
|
||||
if oldCPUs < newCPUs {
|
||||
vcpusAdded := newCPUs - oldCPUs
|
||||
s.Logger().Debugf("Request to onlineCPUMem with %d CPUs", vcpusAdded)
|
||||
if err := s.agent.onlineCPUMem(vcpusAdded, true); err != nil {
|
||||
if err := s.agent.onlineCPUMem(ctx, vcpusAdded, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1859,7 +1861,7 @@ func (s *Sandbox) updateResources() error {
|
||||
|
||||
// Update Memory
|
||||
s.Logger().WithField("memory-sandbox-size-byte", sandboxMemoryByte).Debugf("Request to hypervisor to update memory")
|
||||
newMemory, updatedMemoryDevice, err := s.hypervisor.resizeMemory(uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe)
|
||||
newMemory, updatedMemoryDevice, err := s.hypervisor.resizeMemory(ctx, uint32(sandboxMemoryByte>>utils.MibToBytesShift), s.state.GuestMemoryBlockSizeMB, s.state.GuestMemoryHotplugProbe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1867,11 +1869,11 @@ func (s *Sandbox) updateResources() error {
|
||||
if s.state.GuestMemoryHotplugProbe && updatedMemoryDevice.addr != 0 {
|
||||
// notify the guest kernel about memory hot-add event, before onlining them
|
||||
s.Logger().Debugf("notify guest kernel memory hot-add event via probe interface, memory device located at 0x%x", updatedMemoryDevice.addr)
|
||||
if err := s.agent.memHotplugByProbe(updatedMemoryDevice.addr, uint32(updatedMemoryDevice.sizeMB), s.state.GuestMemoryBlockSizeMB); err != nil {
|
||||
if err := s.agent.memHotplugByProbe(ctx, updatedMemoryDevice.addr, uint32(updatedMemoryDevice.sizeMB), s.state.GuestMemoryBlockSizeMB); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.agent.onlineCPUMem(0, false); err != nil {
|
||||
if err := s.agent.onlineCPUMem(ctx, 0, false); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -1937,7 +1939,7 @@ func (s *Sandbox) GetHypervisorType() string {
|
||||
// 1) get the v1constraints cgroup associated with the stored cgroup path
|
||||
// 2) (re-)add hypervisor vCPU threads to the appropriate cgroup
|
||||
// 3) If we are managing sandbox cgroup, update the v1constraints cgroup size
|
||||
func (s *Sandbox) cgroupsUpdate() error {
|
||||
func (s *Sandbox) cgroupsUpdate(ctx context.Context) error {
|
||||
|
||||
// If Kata is configured for SandboxCgroupOnly, the VMM and its processes are already
|
||||
// in the Kata sandbox cgroup (inherited). Check to see if sandbox cpuset needs to be
|
||||
@ -1965,7 +1967,7 @@ func (s *Sandbox) cgroupsUpdate() error {
|
||||
return fmt.Errorf("Could not load cgroup %v: %v", s.state.CgroupPath, err)
|
||||
}
|
||||
|
||||
if err := s.constrainHypervisor(cgroup); err != nil {
|
||||
if err := s.constrainHypervisor(ctx, cgroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -2035,7 +2037,7 @@ func (s *Sandbox) cgroupsDelete() error {
|
||||
}
|
||||
|
||||
// constrainHypervisor will place the VMM and vCPU threads into cgroups.
|
||||
func (s *Sandbox) constrainHypervisor(cgroup cgroups.Cgroup) error {
|
||||
func (s *Sandbox) constrainHypervisor(ctx context.Context, cgroup cgroups.Cgroup) error {
|
||||
// VMM threads are only placed into the constrained cgroup if SandboxCgroupOnly is being set.
|
||||
// This is the "correct" behavior, but if the parent cgroup isn't set up correctly to take
|
||||
// Kata/VMM into account, Kata may fail to boot due to being overconstrained.
|
||||
@ -2078,7 +2080,7 @@ func (s *Sandbox) constrainHypervisor(cgroup cgroups.Cgroup) error {
|
||||
|
||||
// when new container joins, new CPU could be hotplugged, so we
|
||||
// have to query fresh vcpu info from hypervisor every time.
|
||||
tids, err := s.hypervisor.getThreadIDs()
|
||||
tids, err := s.hypervisor.getThreadIDs(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get thread ids from hypervisor: %v", err)
|
||||
}
|
||||
@ -2242,8 +2244,8 @@ func (s *Sandbox) GetPatchedOCISpec() *specs.Spec {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) GetOOMEvent() (string, error) {
|
||||
return s.agent.getOOMEvent()
|
||||
func (s *Sandbox) GetOOMEvent(ctx context.Context) (string, error) {
|
||||
return s.agent.getOOMEvent(ctx)
|
||||
}
|
||||
|
||||
func (s *Sandbox) GetAgentURL() (string, error) {
|
||||
@ -2310,7 +2312,7 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
|
||||
|
||||
// This sandbox already exists, we don't need to recreate the containers in the guest.
|
||||
// We only need to fetch the containers from storage and create the container structs.
|
||||
if err := sandbox.fetchContainers(); err != nil {
|
||||
if err := sandbox.fetchContainers(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -2321,7 +2323,7 @@ func fetchSandbox(ctx context.Context, sandboxID string) (sandbox *Sandbox, err
|
||||
// adds them to the sandbox. It does not create the containers
|
||||
// in the guest. This should only be used when fetching a
|
||||
// sandbox that already exists.
|
||||
func (s *Sandbox) fetchContainers() error {
|
||||
func (s *Sandbox) fetchContainers(ctx context.Context) error {
|
||||
for i, contConfig := range s.config.Containers {
|
||||
// Add spec from bundle path
|
||||
spec, err := compatoci.GetContainerSpec(contConfig.Annotations)
|
||||
@ -2331,7 +2333,7 @@ func (s *Sandbox) fetchContainers() error {
|
||||
contConfig.CustomSpec = &spec
|
||||
s.config.Containers[i] = contConfig
|
||||
|
||||
c, err := newContainer(s, &s.config.Containers[i])
|
||||
c, err := newContainer(ctx, s, &s.config.Containers[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -6,6 +6,8 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
mutils "github.com/kata-containers/kata-containers/src/runtime/pkg/utils"
|
||||
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -126,8 +128,8 @@ func (s *Sandbox) UpdateRuntimeMetrics() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sandbox) GetAgentMetrics() (string, error) {
|
||||
r, err := s.agent.getAgentMetrics(&grpc.GetMetricsRequest{})
|
||||
func (s *Sandbox) GetAgentMetrics(ctx context.Context) (string, error) {
|
||||
r, err := s.agent.getAgentMetrics(ctx, &grpc.GetMetricsRequest{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ func TestSandboxSetSandboxAndContainerState(t *testing.T) {
|
||||
}
|
||||
|
||||
// persist to disk
|
||||
err = p.storeSandbox()
|
||||
err = p.storeSandbox(p.ctx)
|
||||
assert.NoError(err)
|
||||
|
||||
newSandboxState := types.SandboxState{
|
||||
@ -876,12 +876,12 @@ func TestCreateContainer(t *testing.T) {
|
||||
|
||||
contID := "999"
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1")
|
||||
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.NotNil(t, err, "Should failed to create a duplicated container")
|
||||
assert.Equal(t, len(s.config.Containers), 1, "Container config list length from sandbox structure should be 1")
|
||||
}
|
||||
@ -896,7 +896,7 @@ func TestDeleteContainer(t *testing.T) {
|
||||
assert.NotNil(t, err, "Deletng non-existing container should fail")
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
_, err = s.DeleteContainer(contID)
|
||||
@ -909,17 +909,17 @@ func TestStartContainer(t *testing.T) {
|
||||
defer cleanUp()
|
||||
|
||||
contID := "999"
|
||||
_, err = s.StartContainer(contID)
|
||||
_, err = s.StartContainer(context.Background(), contID)
|
||||
assert.NotNil(t, err, "Starting non-existing container should fail")
|
||||
|
||||
err = s.Start()
|
||||
assert.Nil(t, err, "Failed to start sandbox: %v", err)
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
_, err = s.StartContainer(contID)
|
||||
_, err = s.StartContainer(context.Background(), contID)
|
||||
assert.Nil(t, err, "Start container failed: %v", err)
|
||||
}
|
||||
|
||||
@ -933,7 +933,7 @@ func TestStatusContainer(t *testing.T) {
|
||||
assert.NotNil(t, err, "Status non-existing container should fail")
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
_, err = s.StatusContainer(contID)
|
||||
@ -962,7 +962,7 @@ func TestEnterContainer(t *testing.T) {
|
||||
assert.NotNil(t, err, "Entering non-existing container should fail")
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
_, _, err = s.EnterContainer(contID, cmd)
|
||||
@ -987,7 +987,7 @@ func TestDeleteStoreWhenCreateContainerFail(t *testing.T) {
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
contConfig.RootFs = RootFs{Target: "", Mounted: true}
|
||||
s.state.CgroupPath = filepath.Join(testDir, "bad-cgroup")
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.NotNil(t, err, "Should fail to create container due to wrong cgroup")
|
||||
}
|
||||
|
||||
@ -1051,13 +1051,13 @@ func TestWaitProcess(t *testing.T) {
|
||||
assert.NotNil(t, err, "Wait process in non-existing container should fail")
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
_, err = s.WaitProcess(contID, execID)
|
||||
assert.Nil(t, err, "Wait process in ready container failed: %v", err)
|
||||
|
||||
_, err = s.StartContainer(contID)
|
||||
_, err = s.StartContainer(context.Background(), contID)
|
||||
assert.Nil(t, err, "Start container failed: %v", err)
|
||||
|
||||
_, err = s.WaitProcess(contID, execID)
|
||||
@ -1081,13 +1081,13 @@ func TestSignalProcess(t *testing.T) {
|
||||
assert.NotNil(t, err, "Wait process in non-existing container should fail")
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
err = s.SignalProcess(contID, execID, syscall.SIGKILL, true)
|
||||
assert.Nil(t, err, "Wait process in ready container failed: %v", err)
|
||||
|
||||
_, err = s.StartContainer(contID)
|
||||
_, err = s.StartContainer(context.Background(), contID)
|
||||
assert.Nil(t, err, "Start container failed: %v", err)
|
||||
|
||||
err = s.SignalProcess(contID, execID, syscall.SIGKILL, false)
|
||||
@ -1111,13 +1111,13 @@ func TestWinsizeProcess(t *testing.T) {
|
||||
assert.NotNil(t, err, "Winsize process in non-existing container should fail")
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
err = s.WinsizeProcess(contID, execID, 100, 200)
|
||||
assert.Nil(t, err, "Winsize process in ready container failed: %v", err)
|
||||
|
||||
_, err = s.StartContainer(contID)
|
||||
_, err = s.StartContainer(context.Background(), contID)
|
||||
assert.Nil(t, err, "Start container failed: %v", err)
|
||||
|
||||
err = s.WinsizeProcess(contID, execID, 100, 200)
|
||||
@ -1141,13 +1141,13 @@ func TestContainerProcessIOStream(t *testing.T) {
|
||||
assert.NotNil(t, err, "Winsize process in non-existing container should fail")
|
||||
|
||||
contConfig := newTestContainerConfigNoop(contID)
|
||||
_, err = s.CreateContainer(contConfig)
|
||||
_, err = s.CreateContainer(context.Background(), contConfig)
|
||||
assert.Nil(t, err, "Failed to create container %+v in sandbox %+v: %v", contConfig, s, err)
|
||||
|
||||
_, _, _, err = s.IOStream(contID, execID)
|
||||
assert.Nil(t, err, "Winsize process in ready container failed: %v", err)
|
||||
|
||||
_, err = s.StartContainer(contID)
|
||||
_, err = s.StartContainer(context.Background(), contID)
|
||||
assert.Nil(t, err, "Start container failed: %v", err)
|
||||
|
||||
_, _, _, err = s.IOStream(contID, execID)
|
||||
@ -1204,37 +1204,37 @@ func TestAttachBlockDevice(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, index, 0)
|
||||
|
||||
err = device.Attach(sandbox)
|
||||
err = device.Attach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
index, err = sandbox.getAndSetSandboxBlockIndex()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, index, 2)
|
||||
|
||||
err = device.Detach(sandbox)
|
||||
err = device.Detach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
index, err = sandbox.getAndSetSandboxBlockIndex()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, index, 1)
|
||||
|
||||
container.state.State = types.StateReady
|
||||
err = device.Attach(sandbox)
|
||||
err = device.Attach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(sandbox)
|
||||
err = device.Detach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
|
||||
container.sandbox.config.HypervisorConfig.BlockDeviceDriver = config.VirtioSCSI
|
||||
err = device.Attach(sandbox)
|
||||
err = device.Attach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(sandbox)
|
||||
err = device.Detach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
|
||||
container.state.State = types.StateReady
|
||||
err = device.Attach(sandbox)
|
||||
err = device.Attach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = device.Detach(sandbox)
|
||||
err = device.Detach(context.Background(), sandbox)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@ -1283,7 +1283,7 @@ func TestPreAddDevice(t *testing.T) {
|
||||
}
|
||||
|
||||
// Add a mount device for a mountpoint before container's creation
|
||||
dev, err := sandbox.AddDevice(deviceInfo)
|
||||
dev, err := sandbox.AddDevice(context.Background(), deviceInfo)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// in Frakti use case, here we will create and start the container
|
||||
@ -1419,7 +1419,7 @@ func TestSandboxUpdateResources(t *testing.T) {
|
||||
nil)
|
||||
|
||||
assert.NoError(t, err)
|
||||
err = s.updateResources()
|
||||
err = s.updateResources(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
containerMemLimit := int64(1000)
|
||||
@ -1437,7 +1437,7 @@ func TestSandboxUpdateResources(t *testing.T) {
|
||||
c.Resources.CPU.Period = &containerCPUPeriod
|
||||
c.Resources.CPU.Quota = &containerCPUQouta
|
||||
}
|
||||
err = s.updateResources()
|
||||
err = s.updateResources(context.Background())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
@ -67,12 +68,12 @@ func (endpoint *TapEndpoint) SetProperties(properties NetworkInfo) {
|
||||
}
|
||||
|
||||
// Attach for tap endpoint adds the tap interface to the hypervisor.
|
||||
func (endpoint *TapEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *TapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
return fmt.Errorf("TapEndpoint does not support Attach, if you're using docker please use --net none")
|
||||
}
|
||||
|
||||
// Detach for the tap endpoint tears down the tap
|
||||
func (endpoint *TapEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *TapEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
if !netNsCreated && netNsPath != "" {
|
||||
return nil
|
||||
}
|
||||
@ -84,14 +85,14 @@ func (endpoint *TapEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
}
|
||||
|
||||
// HotAttach for the tap endpoint uses hot plug device
|
||||
func (endpoint *TapEndpoint) HotAttach(h hypervisor) error {
|
||||
func (endpoint *TapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
networkLogger().Info("Hot attaching tap endpoint")
|
||||
if err := tapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil {
|
||||
networkLogger().WithError(err).Error("Error bridging tap ep")
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := h.hotplugAddDevice(endpoint, netDev); err != nil {
|
||||
if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
|
||||
networkLogger().WithError(err).Error("Error attach tap ep")
|
||||
return err
|
||||
}
|
||||
@ -99,7 +100,7 @@ func (endpoint *TapEndpoint) HotAttach(h hypervisor) error {
|
||||
}
|
||||
|
||||
// HotDetach for the tap endpoint uses hot pull device
|
||||
func (endpoint *TapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *TapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
networkLogger().Info("Hot detaching tap endpoint")
|
||||
if err := doNetNS(netNsPath, func(_ ns.NetNS) error {
|
||||
return unTapNetwork(endpoint.TapInterface.TAPIface.Name)
|
||||
@ -107,7 +108,7 @@ func (endpoint *TapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPat
|
||||
networkLogger().WithError(err).Warn("Error un-bridging tap ep")
|
||||
}
|
||||
|
||||
if _, err := h.hotplugRemoveDevice(endpoint, netDev); err != nil {
|
||||
if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
|
||||
networkLogger().WithError(err).Error("Error detach tap ep")
|
||||
return err
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
@ -69,17 +70,17 @@ func (endpoint *TuntapEndpoint) SetProperties(properties NetworkInfo) {
|
||||
}
|
||||
|
||||
// Attach for tap endpoint adds the tap interface to the hypervisor.
|
||||
func (endpoint *TuntapEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *TuntapEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
h := s.hypervisor
|
||||
if err := xConnectVMNetwork(endpoint, h); err != nil {
|
||||
if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
|
||||
networkLogger().WithError(err).Error("Error bridging virtual endpoint")
|
||||
return err
|
||||
}
|
||||
return h.addDevice(endpoint, netDev)
|
||||
return h.addDevice(ctx, endpoint, netDev)
|
||||
}
|
||||
|
||||
// Detach for the tap endpoint tears down the tap
|
||||
func (endpoint *TuntapEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *TuntapEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
if !netNsCreated && netNsPath != "" {
|
||||
return nil
|
||||
}
|
||||
@ -91,14 +92,14 @@ func (endpoint *TuntapEndpoint) Detach(netNsCreated bool, netNsPath string) erro
|
||||
}
|
||||
|
||||
// HotAttach for the tap endpoint uses hot plug device
|
||||
func (endpoint *TuntapEndpoint) HotAttach(h hypervisor) error {
|
||||
func (endpoint *TuntapEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
networkLogger().Info("Hot attaching tap endpoint")
|
||||
if err := tuntapNetwork(endpoint, h.hypervisorConfig().NumVCPUs, h.hypervisorConfig().DisableVhostNet); err != nil {
|
||||
networkLogger().WithError(err).Error("Error bridging tap ep")
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := h.hotplugAddDevice(endpoint, netDev); err != nil {
|
||||
if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
|
||||
networkLogger().WithError(err).Error("Error attach tap ep")
|
||||
return err
|
||||
}
|
||||
@ -106,7 +107,7 @@ func (endpoint *TuntapEndpoint) HotAttach(h hypervisor) error {
|
||||
}
|
||||
|
||||
// HotDetach for the tap endpoint uses hot pull device
|
||||
func (endpoint *TuntapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *TuntapEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
networkLogger().Info("Hot detaching tap endpoint")
|
||||
if err := doNetNS(netNsPath, func(_ ns.NetNS) error {
|
||||
return unTuntapNetwork(endpoint.TuntapInterface.TAPIface.Name)
|
||||
@ -114,7 +115,7 @@ func (endpoint *TuntapEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNs
|
||||
networkLogger().WithError(err).Warn("Error un-bridging tap ep")
|
||||
}
|
||||
|
||||
if _, err := h.hotplugRemoveDevice(endpoint, netDev); err != nil {
|
||||
if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
|
||||
networkLogger().WithError(err).Error("Error detach tap ep")
|
||||
return err
|
||||
}
|
||||
|
@ -5,7 +5,10 @@
|
||||
|
||||
package types
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Type represents a type of bus and bridge.
|
||||
type Type string
|
||||
@ -64,7 +67,7 @@ func NewBridge(bt Type, id string, devices map[uint32]string, addr int) Bridge {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bridge) AddDevice(ID string) (uint32, error) {
|
||||
func (b *Bridge) AddDevice(ctx context.Context, ID string) (uint32, error) {
|
||||
var addr uint32
|
||||
|
||||
// looking for the first available address
|
||||
|
@ -6,6 +6,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@ -18,7 +19,7 @@ func testAddRemoveDevice(t *testing.T, b *Bridge) {
|
||||
// add device
|
||||
devID := "abc123"
|
||||
|
||||
addr, err := b.AddDevice(devID)
|
||||
addr, err := b.AddDevice(context.Background(), devID)
|
||||
assert.NoError(err)
|
||||
if addr < 1 {
|
||||
assert.Fail("address cannot be less than 1")
|
||||
@ -36,7 +37,7 @@ func testAddRemoveDevice(t *testing.T, b *Bridge) {
|
||||
for i := uint32(1); i <= b.MaxCapacity; i++ {
|
||||
b.Devices[i] = fmt.Sprintf("%d", i)
|
||||
}
|
||||
addr, err = b.AddDevice(devID)
|
||||
addr, err = b.AddDevice(context.Background(), devID)
|
||||
assert.Error(err)
|
||||
if addr != 0 {
|
||||
assert.Fail("address should be 0")
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containernetworking/plugins/pkg/ns"
|
||||
@ -90,19 +91,19 @@ func (endpoint *VethEndpoint) SetProperties(properties NetworkInfo) {
|
||||
|
||||
// Attach for veth endpoint bridges the network pair and adds the
|
||||
// tap interface of the network pair to the hypervisor.
|
||||
func (endpoint *VethEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *VethEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
h := s.hypervisor
|
||||
if err := xConnectVMNetwork(endpoint, h); err != nil {
|
||||
if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
|
||||
networkLogger().WithError(err).Error("Error bridging virtual endpoint")
|
||||
return err
|
||||
}
|
||||
|
||||
return h.addDevice(endpoint, netDev)
|
||||
return h.addDevice(ctx, endpoint, netDev)
|
||||
}
|
||||
|
||||
// Detach for the veth endpoint tears down the tap and bridge
|
||||
// created for the veth interface.
|
||||
func (endpoint *VethEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *VethEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
// The network namespace would have been deleted at this point
|
||||
// if it has not been created by virtcontainers.
|
||||
if !netNsCreated {
|
||||
@ -115,13 +116,13 @@ func (endpoint *VethEndpoint) Detach(netNsCreated bool, netNsPath string) error
|
||||
}
|
||||
|
||||
// HotAttach for the veth endpoint uses hot plug device
|
||||
func (endpoint *VethEndpoint) HotAttach(h hypervisor) error {
|
||||
if err := xConnectVMNetwork(endpoint, h); err != nil {
|
||||
func (endpoint *VethEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
if err := xConnectVMNetwork(ctx, endpoint, h); err != nil {
|
||||
networkLogger().WithError(err).Error("Error bridging virtual ep")
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := h.hotplugAddDevice(endpoint, netDev); err != nil {
|
||||
if _, err := h.hotplugAddDevice(ctx, endpoint, netDev); err != nil {
|
||||
networkLogger().WithError(err).Error("Error attach virtual ep")
|
||||
return err
|
||||
}
|
||||
@ -129,7 +130,7 @@ func (endpoint *VethEndpoint) HotAttach(h hypervisor) error {
|
||||
}
|
||||
|
||||
// HotDetach for the veth endpoint uses hot pull device
|
||||
func (endpoint *VethEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *VethEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
if !netNsCreated {
|
||||
return nil
|
||||
}
|
||||
@ -140,7 +141,7 @@ func (endpoint *VethEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPa
|
||||
networkLogger().WithError(err).Warn("Error un-bridging virtual ep")
|
||||
}
|
||||
|
||||
if _, err := h.hotplugRemoveDevice(endpoint, netDev); err != nil {
|
||||
if _, err := h.hotplugRemoveDevice(ctx, endpoint, netDev); err != nil {
|
||||
networkLogger().WithError(err).Error("Error detach virtual ep")
|
||||
return err
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
@ -75,7 +76,7 @@ func (endpoint *VhostUserEndpoint) NetworkPair() *NetworkInterfacePair {
|
||||
}
|
||||
|
||||
// Attach for vhostuser endpoint
|
||||
func (endpoint *VhostUserEndpoint) Attach(s *Sandbox) error {
|
||||
func (endpoint *VhostUserEndpoint) Attach(ctx context.Context, s *Sandbox) error {
|
||||
// Generate a unique ID to be used for hypervisor commandline fields
|
||||
randBytes, err := utils.GenerateRandomBytes(8)
|
||||
if err != nil {
|
||||
@ -90,21 +91,21 @@ func (endpoint *VhostUserEndpoint) Attach(s *Sandbox) error {
|
||||
Type: config.VhostUserNet,
|
||||
}
|
||||
|
||||
return s.hypervisor.addDevice(d, vhostuserDev)
|
||||
return s.hypervisor.addDevice(ctx, d, vhostuserDev)
|
||||
}
|
||||
|
||||
// Detach for vhostuser endpoint
|
||||
func (endpoint *VhostUserEndpoint) Detach(netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *VhostUserEndpoint) Detach(ctx context.Context, netNsCreated bool, netNsPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HotAttach for vhostuser endpoint not supported yet
|
||||
func (endpoint *VhostUserEndpoint) HotAttach(h hypervisor) error {
|
||||
func (endpoint *VhostUserEndpoint) HotAttach(ctx context.Context, h hypervisor) error {
|
||||
return fmt.Errorf("VhostUserEndpoint does not support Hot attach")
|
||||
}
|
||||
|
||||
// HotDetach for vhostuser endpoint not supported yet
|
||||
func (endpoint *VhostUserEndpoint) HotDetach(h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
func (endpoint *VhostUserEndpoint) HotDetach(ctx context.Context, h hypervisor, netNsCreated bool, netNsPath string) error {
|
||||
return fmt.Errorf("VhostUserEndpoint does not support Hot detach")
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
package virtcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
@ -82,7 +83,7 @@ func TestVhostUserEndpointAttach(t *testing.T) {
|
||||
hypervisor: &mockHypervisor{},
|
||||
}
|
||||
|
||||
err := v.Attach(s)
|
||||
err := v.Attach(context.Background(), s)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@ -96,7 +97,7 @@ func TestVhostUserEndpoint_HotAttach(t *testing.T) {
|
||||
|
||||
h := &mockHypervisor{}
|
||||
|
||||
err := v.HotAttach(h)
|
||||
err := v.HotAttach(context.Background(), h)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
@ -110,7 +111,7 @@ func TestVhostUserEndpoint_HotDetach(t *testing.T) {
|
||||
|
||||
h := &mockHypervisor{}
|
||||
|
||||
err := v.HotDetach(h, true, "")
|
||||
err := v.HotDetach(context.Background(), h, true, "")
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ type Virtiofsd interface {
|
||||
// Start virtiofsd, return pid of virtiofsd process
|
||||
Start(context.Context) (pid int, err error)
|
||||
// Stop virtiofsd process
|
||||
Stop() error
|
||||
Stop(context.Context) error
|
||||
}
|
||||
|
||||
// Helper function to check virtiofsd is serving
|
||||
@ -84,7 +84,7 @@ func (v *virtiofsd) getSocketFD() (*os.File, error) {
|
||||
|
||||
// Start the virtiofsd daemon
|
||||
func (v *virtiofsd) Start(ctx context.Context) (int, error) {
|
||||
span, _ := v.trace("Start")
|
||||
span, _ := v.trace(ctx, "Start")
|
||||
defer span.End()
|
||||
pid := 0
|
||||
|
||||
@ -131,8 +131,8 @@ func (v *virtiofsd) Start(ctx context.Context) (int, error) {
|
||||
return pid, socketFD.Close()
|
||||
}
|
||||
|
||||
func (v *virtiofsd) Stop() error {
|
||||
if err := v.kill(); err != nil {
|
||||
func (v *virtiofsd) Stop(ctx context.Context) error {
|
||||
if err := v.kill(ctx); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -204,13 +204,13 @@ func (v *virtiofsd) Logger() *log.Entry {
|
||||
return virtLog.WithField("subsystem", "virtiofsd")
|
||||
}
|
||||
|
||||
func (v *virtiofsd) trace(name string) (otelTrace.Span, context.Context) {
|
||||
if v.ctx == nil {
|
||||
v.ctx = context.Background()
|
||||
func (v *virtiofsd) trace(parent context.Context, name string) (otelTrace.Span, context.Context) {
|
||||
if parent == nil {
|
||||
parent = context.Background()
|
||||
}
|
||||
|
||||
tracer := otel.Tracer("kata")
|
||||
ctx, span := tracer.Start(v.ctx, name)
|
||||
ctx, span := tracer.Start(parent, name)
|
||||
span.SetAttributes(label.Key("subsystem").String("virtiofds"))
|
||||
|
||||
return span, ctx
|
||||
@ -259,8 +259,8 @@ func waitVirtiofsReady(cmd *exec.Cmd, stderr io.ReadCloser, debug bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *virtiofsd) kill() (err error) {
|
||||
span, _ := v.trace("kill")
|
||||
func (v *virtiofsd) kill(ctx context.Context) (err error) {
|
||||
span, _ := v.trace(ctx, "kill")
|
||||
defer span.End()
|
||||
|
||||
if v.PID == 0 {
|
||||
|
@ -120,7 +120,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
agent := newAagentFunc()
|
||||
|
||||
vmSharePath := buildVMSharePath(id, store.RunVMStoragePath())
|
||||
err = agent.configure(hypervisor, id, vmSharePath, config.AgentConfig)
|
||||
err = agent.configure(ctx, hypervisor, id, vmSharePath, config.AgentConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -130,14 +130,14 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
}
|
||||
|
||||
// 3. boot up guest vm
|
||||
if err = hypervisor.startSandbox(vmStartTimeout); err != nil {
|
||||
if err = hypervisor.startSandbox(ctx, vmStartTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
virtLog.WithField("vm", id).WithError(err).Info("clean up vm")
|
||||
hypervisor.stopSandbox()
|
||||
hypervisor.stopSandbox(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -145,7 +145,7 @@ func NewVM(ctx context.Context, config VMConfig) (*VM, error) {
|
||||
// VMs booted from template are paused, do not check
|
||||
if !config.HypervisorConfig.BootFromTemplate {
|
||||
virtLog.WithField("vm", id).Info("check agent status")
|
||||
err = agent.check()
|
||||
err = agent.check(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -213,9 +213,9 @@ func (v *VM) logger() logrus.FieldLogger {
|
||||
}
|
||||
|
||||
// Pause pauses a VM.
|
||||
func (v *VM) Pause() error {
|
||||
func (v *VM) Pause(ctx context.Context) error {
|
||||
v.logger().Info("pause vm")
|
||||
return v.hypervisor.pauseSandbox()
|
||||
return v.hypervisor.pauseSandbox(ctx)
|
||||
}
|
||||
|
||||
// Save saves a VM to persistent disk.
|
||||
@ -225,22 +225,22 @@ func (v *VM) Save() error {
|
||||
}
|
||||
|
||||
// Resume resumes a paused VM.
|
||||
func (v *VM) Resume() error {
|
||||
func (v *VM) Resume(ctx context.Context) error {
|
||||
v.logger().Info("resume vm")
|
||||
return v.hypervisor.resumeSandbox()
|
||||
return v.hypervisor.resumeSandbox(ctx)
|
||||
}
|
||||
|
||||
// Start kicks off a configured VM.
|
||||
func (v *VM) Start() error {
|
||||
func (v *VM) Start(ctx context.Context) error {
|
||||
v.logger().Info("start vm")
|
||||
return v.hypervisor.startSandbox(vmStartTimeout)
|
||||
return v.hypervisor.startSandbox(ctx, vmStartTimeout)
|
||||
}
|
||||
|
||||
// Disconnect agent connections to a VM
|
||||
func (v *VM) Disconnect() error {
|
||||
func (v *VM) Disconnect(ctx context.Context) error {
|
||||
v.logger().Info("kill vm")
|
||||
|
||||
if err := v.agent.disconnect(); err != nil {
|
||||
if err := v.agent.disconnect(ctx); err != nil {
|
||||
v.logger().WithError(err).Error("failed to disconnect agent")
|
||||
}
|
||||
|
||||
@ -248,10 +248,10 @@ func (v *VM) Disconnect() error {
|
||||
}
|
||||
|
||||
// Stop stops a VM process.
|
||||
func (v *VM) Stop() error {
|
||||
func (v *VM) Stop(ctx context.Context) error {
|
||||
v.logger().Info("stop vm")
|
||||
|
||||
if err := v.hypervisor.stopSandbox(); err != nil {
|
||||
if err := v.hypervisor.stopSandbox(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -259,10 +259,10 @@ func (v *VM) Stop() error {
|
||||
}
|
||||
|
||||
// AddCPUs adds num of CPUs to the VM.
|
||||
func (v *VM) AddCPUs(num uint32) error {
|
||||
func (v *VM) AddCPUs(ctx context.Context, num uint32) error {
|
||||
if num > 0 {
|
||||
v.logger().Infof("hot adding %d vCPUs", num)
|
||||
if _, err := v.hypervisor.hotplugAddDevice(num, cpuDev); err != nil {
|
||||
if _, err := v.hypervisor.hotplugAddDevice(ctx, num, cpuDev); err != nil {
|
||||
return err
|
||||
}
|
||||
v.cpuDelta += num
|
||||
@ -273,11 +273,11 @@ func (v *VM) AddCPUs(num uint32) error {
|
||||
}
|
||||
|
||||
// AddMemory adds numMB of memory to the VM.
|
||||
func (v *VM) AddMemory(numMB uint32) error {
|
||||
func (v *VM) AddMemory(ctx context.Context, numMB uint32) error {
|
||||
if numMB > 0 {
|
||||
v.logger().Infof("hot adding %d MB memory", numMB)
|
||||
dev := &memoryDevice{1, int(numMB), 0, false}
|
||||
if _, err := v.hypervisor.hotplugAddDevice(dev, memoryDev); err != nil {
|
||||
if _, err := v.hypervisor.hotplugAddDevice(ctx, dev, memoryDev); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -286,9 +286,9 @@ func (v *VM) AddMemory(numMB uint32) error {
|
||||
}
|
||||
|
||||
// OnlineCPUMemory puts the hotplugged CPU and memory online.
|
||||
func (v *VM) OnlineCPUMemory() error {
|
||||
func (v *VM) OnlineCPUMemory(ctx context.Context) error {
|
||||
v.logger().Infof("online CPU %d and memory", v.cpuDelta)
|
||||
err := v.agent.onlineCPUMem(v.cpuDelta, false)
|
||||
err := v.agent.onlineCPUMem(ctx, v.cpuDelta, false)
|
||||
if err == nil {
|
||||
v.cpuDelta = 0
|
||||
}
|
||||
@ -298,7 +298,7 @@ func (v *VM) OnlineCPUMemory() error {
|
||||
|
||||
// ReseedRNG adds random entropy to guest random number generator
|
||||
// and reseeds it.
|
||||
func (v *VM) ReseedRNG() error {
|
||||
func (v *VM) ReseedRNG(ctx context.Context) error {
|
||||
v.logger().Infof("reseed guest random number generator")
|
||||
urandomDev := "/dev/urandom"
|
||||
data := make([]byte, 512)
|
||||
@ -313,14 +313,14 @@ func (v *VM) ReseedRNG() error {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.agent.reseedRNG(data)
|
||||
return v.agent.reseedRNG(ctx, data)
|
||||
}
|
||||
|
||||
// SyncTime syncs guest time with host time.
|
||||
func (v *VM) SyncTime() error {
|
||||
func (v *VM) SyncTime(ctx context.Context) error {
|
||||
now := time.Now()
|
||||
v.logger().WithField("time", now).Infof("sync guest time")
|
||||
return v.agent.setGuestDateTime(now)
|
||||
return v.agent.setGuestDateTime(ctx, now)
|
||||
}
|
||||
|
||||
func (v *VM) assignSandbox(s *Sandbox) error {
|
||||
@ -364,8 +364,8 @@ func (v *VM) assignSandbox(s *Sandbox) error {
|
||||
}
|
||||
|
||||
// ToGrpc convert VM struct to Grpc format pb.GrpcVM.
|
||||
func (v *VM) ToGrpc(config VMConfig) (*pb.GrpcVM, error) {
|
||||
hJSON, err := v.hypervisor.toGrpc()
|
||||
func (v *VM) ToGrpc(ctx context.Context, config VMConfig) (*pb.GrpcVM, error) {
|
||||
hJSON, err := v.hypervisor.toGrpc(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user