diff --git a/src/runtime/containerd-shim-v2/service.go b/src/runtime/containerd-shim-v2/service.go index c3375bb915..40a9ecb85f 100644 --- a/src/runtime/containerd-shim-v2/service.go +++ b/src/runtime/containerd-shim-v2/service.go @@ -181,8 +181,8 @@ func newCommand(ctx context.Context, id, containerdBinary, containerdAddress str return cmd, nil } -// StartShim willl start a kata shimv2 daemon which will implemented the -// ShimV2 APIs such as create/start/update etc containers. +// StartShim is a binary call that starts a kata shimv2 service which will +// implement the ShimV2 APIs such as create/start/update etc containers. func (s *service) StartShim(ctx context.Context, opts cdshim.StartOpts) (_ string, retErr error) { bundlePath, err := os.Getwd() if err != nil { @@ -310,6 +310,7 @@ func getTopic(e interface{}) string { return cdruntime.TaskUnknownTopic } +// Cleanup is a binary call that cleans up resources used by the shim func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) { span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Cleanup", shimTracingTags) defer span.End() @@ -369,6 +370,8 @@ func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err e // Create a new sandbox or container with the underlying OCI runtime func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) { + shimLog.WithField("container", r.ID).Debug("Create() start") + defer shimLog.WithField("container", r.ID).Debug("Create() end") start := time.Now() defer func() { err = toGRPC(err) @@ -426,6 +429,8 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * // Start a process func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) { + shimLog.WithField("container", r.ID).Debug("Start() start") + defer shimLog.WithField("container", r.ID).Debug("Start() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Start", shimTracingTags) defer span.End() @@ -477,6 +482,8 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP // Delete the initial process and container func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) { + shimLog.WithField("container", r.ID).Debug("Delete() start") + defer shimLog.WithField("container", r.ID).Debug("Delete() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Delete", shimTracingTags) defer span.End() @@ -529,6 +536,8 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task // Exec an additional process inside the container func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("Exec() start") + defer shimLog.WithField("container", r.ID).Debug("Exec() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "Exec", shimTracingTags) defer span.End() @@ -567,6 +576,8 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *p // ResizePty of a process func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("ResizePty() start") + defer shimLog.WithField("container", r.ID).Debug("ResizePty() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "ResizePty", shimTracingTags) defer span.End() @@ -606,6 +617,8 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ // State returns runtime state information for a process func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) { + shimLog.WithField("container", r.ID).Debug("State() start") + defer shimLog.WithField("container", r.ID).Debug("State() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "State", shimTracingTags) defer span.End() @@ -660,6 +673,8 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP // Pause the container func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("Pause() start") + defer shimLog.WithField("container", r.ID).Debug("Pause() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Pause", shimTracingTags) defer span.End() @@ -699,6 +714,8 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes // Resume the container func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("Resume() start") + defer shimLog.WithField("container", r.ID).Debug("Resume() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Resume", shimTracingTags) defer span.End() @@ -736,6 +753,8 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp // Kill a process with the provided signal func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("Kill() start") + defer shimLog.WithField("container", r.ID).Debug("Kill() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Kill", shimTracingTags) defer span.End() @@ -797,6 +816,8 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E // Since for kata, it cannot get the process's pid from VM, // thus only return the Shim's pid directly. func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) { + shimLog.WithField("container", r.ID).Debug("Pids() start") + defer shimLog.WithField("container", r.ID).Debug("Pids() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "Pids", shimTracingTags) defer span.End() @@ -820,6 +841,8 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI. // CloseIO of a process func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("CloseIO() start") + defer shimLog.WithField("container", r.ID).Debug("CloseIO() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "CloseIO", shimTracingTags) defer span.End() @@ -861,6 +884,8 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *pt // Checkpoint the container func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("Checkpoint() start") + defer shimLog.WithField("container", r.ID).Debug("Checkpoint() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "Checkpoint", shimTracingTags) defer span.End() @@ -875,6 +900,8 @@ func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskReque // Connect returns shim information such as the shim's pid func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) { + shimLog.WithField("container", r.ID).Debug("Connect() start") + defer shimLog.WithField("container", r.ID).Debug("Connect() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "Connect", shimTracingTags) defer span.End() @@ -895,6 +922,8 @@ func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *ta } func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("Shutdown() start") + defer shimLog.WithField("container", r.ID).Debug("Shutdown() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "Shutdown", shimTracingTags) start := time.Now() @@ -928,6 +957,8 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ * } func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) { + shimLog.WithField("container", r.ID).Debug("Stats() start") + defer shimLog.WithField("container", r.ID).Debug("Stats() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Stats", shimTracingTags) defer span.End() @@ -957,6 +988,8 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP // Update a running container func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) { + shimLog.WithField("container", r.ID).Debug("Update() start") + defer shimLog.WithField("container", r.ID).Debug("Update() end") span, spanCtx := katatrace.Trace(s.rootCtx, shimLog, "Update", shimTracingTags) defer span.End() @@ -989,6 +1022,8 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ * // Wait for a process to exit func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) { + shimLog.WithField("container", r.ID).Debug("Wait() start") + defer shimLog.WithField("container", r.ID).Debug("Wait() end") span, _ := katatrace.Trace(s.rootCtx, shimLog, "Wait", shimTracingTags) defer span.End() diff --git a/src/runtime/virtcontainers/sandbox.go b/src/runtime/virtcontainers/sandbox.go index 592efb64bf..81034dedaf 100644 --- a/src/runtime/virtcontainers/sandbox.go +++ b/src/runtime/virtcontainers/sandbox.go @@ -700,7 +700,7 @@ func (s *Sandbox) Delete(ctx context.Context) error { for _, c := range s.containers { if err := c.delete(ctx); err != nil { - s.Logger().WithError(err).WithField("cid", c.id).Debug("failed to delete container") + s.Logger().WithError(err).WithField("container`", c.id).Debug("failed to delete container") } } @@ -1264,7 +1264,7 @@ func (s *Sandbox) CreateContainer(ctx context.Context, contConfig ContainerConfi defer func() { // Rollback if error happens. if err != nil { - logger := s.Logger().WithFields(logrus.Fields{"container-id": c.id, "sandox-id": s.id, "rollback": true}) + logger := s.Logger().WithFields(logrus.Fields{"container": c.id, "sandbox": s.id, "rollback": true}) logger.WithError(err).Error("Cleaning up partially created container") if errStop := c.stop(ctx, true); errStop != nil { @@ -1311,7 +1311,7 @@ func (s *Sandbox) StartContainer(ctx context.Context, containerID string) (VCCon return nil, err } - s.Logger().Info("Container is started") + s.Logger().WithField("container", containerID).Info("Container is started") // Update sandbox resources in case a stopped container // is started @@ -1998,7 +1998,7 @@ func (s *Sandbox) calculateSandboxMemory() (int64, bool, int64) { for _, c := range s.config.Containers { // Do not hot add again non-running containers resources if cont, ok := s.containers[c.ID]; ok && cont.state.State == types.StateStopped { - s.Logger().WithField("container-id", c.ID).Debug("Do not taking into account memory resources of not running containers") + s.Logger().WithField("container", c.ID).Debug("Do not taking into account memory resources of not running containers") continue } @@ -2035,7 +2035,7 @@ func (s *Sandbox) calculateSandboxCPUs() (uint32, error) { for _, c := range s.config.Containers { // Do not hot add again non-running containers resources if cont, ok := s.containers[c.ID]; ok && cont.state.State == types.StateStopped { - s.Logger().WithField("container-id", c.ID).Debug("Do not taking into account CPU resources of not running containers") + s.Logger().WithField("container", c.ID).Debug("Do not taking into account CPU resources of not running containers") continue }