From 74192d179d8d33bf1753f13048093d3cb4978691 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Wed, 24 Mar 2021 16:36:00 +0800 Subject: [PATCH] runtime: fix static check errors It turns out we have managed to break the static checker in many difference places with the absence of static checker in github action. Let's fix them while enabling static checker in github actions... Signed-off-by: Peng Tao --- src/runtime/cli/kata-check.go | 3 +- src/runtime/cli/kata-monitor/main.go | 4 +- src/runtime/cli/main.go | 4 - src/runtime/cli/main_test.go | 46 +--------- src/runtime/cli/release_test.go | 2 +- src/runtime/cli/utils.go | 18 ---- src/runtime/cli/utils_arch_base.go | 10 --- src/runtime/cli/utils_s390x.go | 10 --- src/runtime/containerd-shim-v2/service.go | 34 +++---- .../containerd-shim-v2/shim_management.go | 5 +- .../containerd-shim-v2/shim_metrics.go | 2 +- .../containerd-shim-v2/shim_metrics_test.go | 2 +- src/runtime/pkg/kata-monitor/metrics_test.go | 4 +- src/runtime/pkg/kata-monitor/monitor.go | 4 +- src/runtime/pkg/kata-monitor/pprof.go | 2 +- src/runtime/pkg/kata-monitor/sandbox_cache.go | 2 +- src/runtime/pkg/kata-monitor/shim_client.go | 2 +- .../pkg/katautils/config-settings.go.in | 1 - src/runtime/pkg/katautils/config.go | 88 +++++++----------- src/runtime/virtcontainers/acrn.go | 6 +- src/runtime/virtcontainers/api.go | 6 +- src/runtime/virtcontainers/api_test.go | 31 ------- src/runtime/virtcontainers/clh.go | 12 +-- src/runtime/virtcontainers/container.go | 2 +- src/runtime/virtcontainers/factory/factory.go | 2 +- src/runtime/virtcontainers/fc.go | 23 +++-- src/runtime/virtcontainers/fc_metrics.go | 90 +++++++++---------- src/runtime/virtcontainers/kata_agent.go | 31 +++---- src/runtime/virtcontainers/kata_agent_test.go | 1 + src/runtime/virtcontainers/mock_agent.go | 3 +- src/runtime/virtcontainers/mount.go | 4 +- src/runtime/virtcontainers/network.go | 4 +- .../pkg/agent/protocols/client/client.go | 4 +- src/runtime/virtcontainers/pkg/oci/utils.go | 24 ++--- src/runtime/virtcontainers/qemu.go | 62 +++++-------- src/runtime/virtcontainers/sandbox.go | 29 +++--- src/runtime/virtcontainers/sandbox_metrics.go | 4 +- 37 files changed, 203 insertions(+), 378 deletions(-) delete mode 100644 src/runtime/cli/utils_arch_base.go delete mode 100644 src/runtime/cli/utils_s390x.go diff --git a/src/runtime/cli/kata-check.go b/src/runtime/cli/kata-check.go index e2008c1bec..1a0b130d6b 100644 --- a/src/runtime/cli/kata-check.go +++ b/src/runtime/cli/kata-check.go @@ -63,7 +63,6 @@ const ( moduleParamDir = "parameters" successMessageCapable = "System is capable of running " + project successMessageCreate = "System can currently create " + project - successMessageVersion = "Version consistency of " + project + " is verified" failMessage = "System is not capable of running " + project kernelPropertyCorrect = "Kernel property value correct" @@ -398,7 +397,7 @@ EXAMPLES: span, _ := katautils.Trace(ctx, "check") defer span.End() - if context.Bool("no-network-checks") == false && os.Getenv(noNetworkEnvVar) == "" { + if !context.Bool("no-network-checks") && os.Getenv(noNetworkEnvVar) == "" { cmd := RelCmdCheck if context.Bool("only-list-releases") { diff --git a/src/runtime/cli/kata-monitor/main.go b/src/runtime/cli/kata-monitor/main.go index accd211945..c3565bcaf4 100644 --- a/src/runtime/cli/kata-monitor/main.go +++ b/src/runtime/cli/kata-monitor/main.go @@ -48,9 +48,9 @@ var versionTemplate = `{{.AppName}} ` func printVersion(ver versionInfo) { - t, err := template.New("version").Parse(versionTemplate) + t, _ := template.New("version").Parse(versionTemplate) - if err = t.Execute(os.Stdout, ver); err != nil { + if err := t.Execute(os.Stdout, ver); err != nil { panic(err) } } diff --git a/src/runtime/cli/main.go b/src/runtime/cli/main.go index c2f0ac5f12..0c454f6608 100644 --- a/src/runtime/cli/main.go +++ b/src/runtime/cli/main.go @@ -64,9 +64,6 @@ var originalLoggerLevel = logrus.WarnLevel var debug = false -// if true, coredump when an internal error occurs or a fatal signal is received -var crashOnError = false - // concrete virtcontainer implementation var virtcontainersImpl = &vc.VCImpl{} @@ -328,7 +325,6 @@ func beforeSubcommands(c *cli.Context) error { } if !subCmdIsCheckCmd { debug = runtimeConfig.Debug - crashOnError = runtimeConfig.Debug if traceRootSpan != "" { // Create the tracer. diff --git a/src/runtime/cli/main_test.go b/src/runtime/cli/main_test.go index d27052ae73..d47a016560 100644 --- a/src/runtime/cli/main_test.go +++ b/src/runtime/cli/main_test.go @@ -8,7 +8,6 @@ package main import ( "bytes" "context" - "encoding/json" "errors" "flag" "fmt" @@ -29,7 +28,6 @@ import ( "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock" - "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" @@ -44,10 +42,8 @@ const ( // small docker image used to create root filesystems from testDockerImage = "busybox" - testSandboxID = "99999999-9999-9999-99999999999999999" - testContainerID = "1" - testBundle = "bundle" - testConsole = "/dev/pts/999" + testBundle = "bundle" + testConsole = "/dev/pts/999" ) var ( @@ -387,44 +383,6 @@ func makeOCIBundle(bundleDir string) error { return nil } -func writeOCIConfigFile(spec specs.Spec, configPath string) error { - if configPath == "" { - return errors.New("BUG: need config file path") - } - - bytes, err := json.MarshalIndent(spec, "", "\t") - if err != nil { - return err - } - - return ioutil.WriteFile(configPath, bytes, testFileMode) -} - -func newSingleContainerStatus(containerID string, containerState types.ContainerState, annotations map[string]string, spec *specs.Spec) vc.ContainerStatus { - return vc.ContainerStatus{ - ID: containerID, - State: containerState, - Annotations: annotations, - Spec: spec, - } -} - -func execCLICommandFunc(assertHandler *assert.Assertions, cliCommand cli.Command, set *flag.FlagSet, expectedErr bool) { - ctx := createCLIContext(set) - ctx.App.Name = "foo" - - fn, ok := cliCommand.Action.(func(context *cli.Context) error) - assertHandler.True(ok) - - err := fn(ctx) - - if expectedErr { - assertHandler.Error(err) - } else { - assertHandler.Nil(err) - } -} - func createCLIContextWithApp(flagSet *flag.FlagSet, app *cli.App) *cli.Context { ctx := cli.NewContext(app, flagSet, nil) diff --git a/src/runtime/cli/release_test.go b/src/runtime/cli/release_test.go index b6d94ac0f5..fc5b46cbad 100644 --- a/src/runtime/cli/release_test.go +++ b/src/runtime/cli/release_test.go @@ -458,7 +458,7 @@ func TestGetNewReleaseType(t *testing.T) { } data := []testData{ - // Check build metadata (ignored for version comparisions) + // Check build metadata (ignored for version comparisons) {"2.0.0+build", "2.0.0", true, ""}, {"2.0.0+build-1", "2.0.0+build-2", true, ""}, {"1.12.0+build", "1.12.0", true, ""}, diff --git a/src/runtime/cli/utils.go b/src/runtime/cli/utils.go index 4d982e8cb6..be2b9e5d7b 100644 --- a/src/runtime/cli/utils.go +++ b/src/runtime/cli/utils.go @@ -189,21 +189,3 @@ func constructVersionInfo(version string) VersionInfo { } } - -func versionEqual(a VersionInfo, b VersionInfo) bool { - av, err := semver.Make(a.Semver) - if err != nil { - return false - } - - bv, err := semver.Make(b.Semver) - if err != nil { - return false - } - - if av.Major == bv.Major && av.Minor == bv.Minor && av.Patch == bv.Patch { - return true - } - - return false -} diff --git a/src/runtime/cli/utils_arch_base.go b/src/runtime/cli/utils_arch_base.go deleted file mode 100644 index a0c6a5e71f..0000000000 --- a/src/runtime/cli/utils_arch_base.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !s390x -// -// SPDX-License-Identifier: Apache-2.0 -// - -package main - -func archConvertStatFs(cgroupFsType int) int64 { - return int64(cgroupFsType) -} diff --git a/src/runtime/cli/utils_s390x.go b/src/runtime/cli/utils_s390x.go deleted file mode 100644 index 6acd5627ef..0000000000 --- a/src/runtime/cli/utils_s390x.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2018 IBM -// -// SPDX-License-Identifier: Apache-2.0 -// - -package main - -func archConvertStatFs(cgroupFsType int) uint32 { - return uint32(cgroupFsType) -} diff --git a/src/runtime/containerd-shim-v2/service.go b/src/runtime/containerd-shim-v2/service.go index bcecebe3a2..b33b2c7e7a 100644 --- a/src/runtime/containerd-shim-v2/service.go +++ b/src/runtime/containerd-shim-v2/service.go @@ -301,7 +301,7 @@ func trace(ctx context.Context, name string) (otelTrace.Span, context.Context) { } func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) { - span, ctx := trace(s.rootCtx, "Cleanup") + span, _ := trace(s.rootCtx, "Cleanup") defer span.End() //Since the binary cleanup will return the DeleteResponse from stdout to @@ -412,7 +412,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * // Start a process func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) { - span, ctx := trace(s.rootCtx, "Start") + span, _ := trace(s.rootCtx, "Start") defer span.End() start := time.Now() @@ -463,7 +463,7 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP // Delete the initial process and container func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) { - span, ctx := trace(s.rootCtx, "Delete") + span, _ := trace(s.rootCtx, "Delete") defer span.End() start := time.Now() @@ -515,7 +515,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task // Exec an additional process inside the container func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "Exec") + span, _ := trace(s.rootCtx, "Exec") defer span.End() start := time.Now() @@ -553,7 +553,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *p // ResizePty of a process func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "ResizePty") + span, _ := trace(s.rootCtx, "ResizePty") defer span.End() start := time.Now() @@ -592,7 +592,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ // State returns runtime state information for a process func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) { - span, ctx := trace(s.rootCtx, "State") + span, _ := trace(s.rootCtx, "State") defer span.End() start := time.Now() @@ -644,7 +644,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP // Pause the container func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "Pause") + span, _ := trace(s.rootCtx, "Pause") defer span.End() start := time.Now() @@ -683,7 +683,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes // Resume the container func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "Resume") + span, _ := trace(s.rootCtx, "Resume") defer span.End() start := time.Now() @@ -720,7 +720,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp // Kill a process with the provided signal func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "Kill") + span, _ := trace(s.rootCtx, "Kill") defer span.End() start := time.Now() @@ -781,7 +781,7 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E // Since for kata, it cannot get the process's pid from VM, // thus only return the Shim's pid directly. func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) { - span, ctx := trace(s.rootCtx, "Pids") + span, _ := trace(s.rootCtx, "Pids") defer span.End() var processes []*task.ProcessInfo @@ -804,7 +804,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI. // CloseIO of a process func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "CloseIO") + span, _ := trace(s.rootCtx, "CloseIO") defer span.End() start := time.Now() @@ -845,7 +845,7 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *pt // Checkpoint the container func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "Checkpoint") + span, _ := trace(s.rootCtx, "Checkpoint") defer span.End() start := time.Now() @@ -859,7 +859,7 @@ func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskReque // Connect returns shim information such as the shim's pid func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) { - span, ctx := trace(s.rootCtx, "Connect") + span, _ := trace(s.rootCtx, "Connect") defer span.End() start := time.Now() @@ -879,7 +879,7 @@ func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *ta } func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "Shutdown") + span, _ := trace(s.rootCtx, "Shutdown") start := time.Now() defer func() { @@ -907,7 +907,7 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ * } func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) { - span, ctx := trace(s.rootCtx, "Stats") + span, _ := trace(s.rootCtx, "Stats") defer span.End() start := time.Now() @@ -936,7 +936,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP // Update a running container func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) { - span, ctx := trace(s.rootCtx, "Update") + span, _ := trace(s.rootCtx, "Update") defer span.End() start := time.Now() @@ -968,7 +968,7 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ * // Wait for a process to exit func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) { - span, ctx := trace(s.rootCtx, "Wait") + span, _ := trace(s.rootCtx, "Wait") defer span.End() var ret uint32 diff --git a/src/runtime/containerd-shim-v2/shim_management.go b/src/runtime/containerd-shim-v2/shim_management.go index 2f08a88fcc..e03224f072 100644 --- a/src/runtime/containerd-shim-v2/shim_management.go +++ b/src/runtime/containerd-shim-v2/shim_management.go @@ -65,8 +65,7 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) { // encode the metrics encoder := expfmt.NewEncoder(w, expfmt.FmtText) for _, mf := range mfs { - if err := encoder.Encode(mf); err != nil { - } + encoder.Encode(mf) } // if using an old agent, only collect shim/sandbox metrics. @@ -150,7 +149,7 @@ func (s *service) startManagementServer(ctx context.Context, ociSpec *specs.Spec shimMgtLog.Info("kata management inited") - // bind hanlder + // bind handler m := http.NewServeMux() m.Handle("/metrics", http.HandlerFunc(s.serveMetrics)) m.Handle("/agent-url", http.HandlerFunc(s.agentURL)) diff --git a/src/runtime/containerd-shim-v2/shim_metrics.go b/src/runtime/containerd-shim-v2/shim_metrics.go index e9f3655ae1..d946665e18 100644 --- a/src/runtime/containerd-shim-v2/shim_metrics.go +++ b/src/runtime/containerd-shim-v2/shim_metrics.go @@ -177,7 +177,7 @@ func calcOverhead(initialSandboxStats, finishSandboxStats vc.SandboxStats, initi cpuUsageGuest := float64(guestFinalCPU-guestInitCPU) / deltaTime * 100 cpuUsageHost := float64(hostFinalCPU-hostInitCPU) / deltaTime * 100 - return float64(hostMemoryUsage - guestMemoryUsage), float64(cpuUsageHost - cpuUsageGuest) + return float64(hostMemoryUsage - guestMemoryUsage), cpuUsageHost - cpuUsageGuest } func (s *service) getPodOverhead(ctx context.Context) (float64, float64, error) { diff --git a/src/runtime/containerd-shim-v2/shim_metrics_test.go b/src/runtime/containerd-shim-v2/shim_metrics_test.go index 1808f66e37..255c0ccfed 100644 --- a/src/runtime/containerd-shim-v2/shim_metrics_test.go +++ b/src/runtime/containerd-shim-v2/shim_metrics_test.go @@ -97,7 +97,7 @@ func TestStatsSandbox(t *testing.T) { sandbox.StatsFunc = getSandboxCPUFunc(2000, 110000) sandbox.StatsContainerFunc = getStatsContainerCPUFunc(200, 400, 20000, 40000) - finishSandboxStats, finishContainersStats, err := s.statsSandbox(context.Background()) + finishSandboxStats, finishContainersStats, _ := s.statsSandbox(context.Background()) // calc overhead mem, cpu := calcOverhead(initialSandboxStats, finishSandboxStats, initialContainerStats, finishContainersStats, 1e9) diff --git a/src/runtime/pkg/kata-monitor/metrics_test.go b/src/runtime/pkg/kata-monitor/metrics_test.go index 8ea63b1d72..5263d2a932 100644 --- a/src/runtime/pkg/kata-monitor/metrics_test.go +++ b/src/runtime/pkg/kata-monitor/metrics_test.go @@ -107,14 +107,14 @@ func TestEncodeMetricFamily(t *testing.T) { scrapeCount.Inc() scrapeCount.Inc() - mfs, err := prometheus.DefaultGatherer.Gather() + mfs, _ := prometheus.DefaultGatherer.Gather() // create encoder buf := bytes.NewBufferString("") encoder := expfmt.NewEncoder(buf, expfmt.FmtText) // encode metrics to text format - err = encodeMetricFamily(mfs, encoder) + err := encodeMetricFamily(mfs, encoder) assert.Nil(err, "encodeMetricFamily should not return error") // here will be to many metrics, diff --git a/src/runtime/pkg/kata-monitor/monitor.go b/src/runtime/pkg/kata-monitor/monitor.go index 3254b52021..64266fdb7d 100644 --- a/src/runtime/pkg/kata-monitor/monitor.go +++ b/src/runtime/pkg/kata-monitor/monitor.go @@ -38,7 +38,7 @@ type KataMonitor struct { // NewKataMonitor create and return a new KataMonitor instance func NewKataMonitor(containerdAddr, containerdConfigFile string) (*KataMonitor, error) { if containerdAddr == "" { - return nil, fmt.Errorf("Containerd serve address missing.") + return nil, fmt.Errorf("containerd serve address missing") } containerdConf := &srvconfig.Config{ @@ -82,7 +82,7 @@ func (km *KataMonitor) initSandboxCache() error { // GetAgentURL returns agent URL func (km *KataMonitor) GetAgentURL(w http.ResponseWriter, r *http.Request) { - sandboxID, err := getSandboxIdFromReq(r) + sandboxID, err := getSandboxIDFromReq(r) if err != nil { commonServeError(w, http.StatusBadRequest, err) return diff --git a/src/runtime/pkg/kata-monitor/pprof.go b/src/runtime/pkg/kata-monitor/pprof.go index 9e54315a42..86f39c4667 100644 --- a/src/runtime/pkg/kata-monitor/pprof.go +++ b/src/runtime/pkg/kata-monitor/pprof.go @@ -21,7 +21,7 @@ func serveError(w http.ResponseWriter, status int, txt string) { } func (km *KataMonitor) composeSocketAddress(r *http.Request) (string, error) { - sandbox, err := getSandboxIdFromReq(r) + sandbox, err := getSandboxIDFromReq(r) if err != nil { return "", err } diff --git a/src/runtime/pkg/kata-monitor/sandbox_cache.go b/src/runtime/pkg/kata-monitor/sandbox_cache.go index 8d3b579751..f749ef4641 100644 --- a/src/runtime/pkg/kata-monitor/sandbox_cache.go +++ b/src/runtime/pkg/kata-monitor/sandbox_cache.go @@ -157,7 +157,7 @@ func (sc *sandboxCache) startEventsListener(addr string) error { // if the container is a sandbox container, // means the VM is started, and can start to collect metrics from the VM. if isSandboxContainer(&c) { - // we can simply put the contaienrid in sandboxes list if the conatiner is a sandbox container + // we can simply put the contaienrid in sandboxes list if the container is a sandbox container sc.putIfNotExists(cc.ID, e.Namespace) monitorLog.WithField("container", cc.ID).Info("add sandbox to cache") } diff --git a/src/runtime/pkg/kata-monitor/shim_client.go b/src/runtime/pkg/kata-monitor/shim_client.go index f711f88f99..b2c462e8fa 100644 --- a/src/runtime/pkg/kata-monitor/shim_client.go +++ b/src/runtime/pkg/kata-monitor/shim_client.go @@ -25,7 +25,7 @@ func commonServeError(w http.ResponseWriter, status int, err error) { } } -func getSandboxIdFromReq(r *http.Request) (string, error) { +func getSandboxIDFromReq(r *http.Request) (string, error) { sandbox := r.URL.Query().Get("sandbox") if sandbox != "" { return sandbox, nil diff --git a/src/runtime/pkg/katautils/config-settings.go.in b/src/runtime/pkg/katautils/config-settings.go.in index 730b0c8bb8..7cd9138baa 100644 --- a/src/runtime/pkg/katautils/config-settings.go.in +++ b/src/runtime/pkg/katautils/config-settings.go.in @@ -17,7 +17,6 @@ var defaultInitrdPath = "/usr/share/kata-containers/kata-containers-initrd.img" var defaultFirmwarePath = "" var defaultMachineAccelerators = "" var defaultCPUFeatures = "" -var defaultShimPath = "/usr/libexec/kata-containers/kata-shim" var systemdUnitName = "kata-containers.target" const defaultKernelParams = "" diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index d10037501f..d3dce569dc 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -72,12 +72,9 @@ type factory struct { type hypervisor struct { Path string `toml:"path"` - HypervisorPathList []string `toml:"valid_hypervisor_paths"` JailerPath string `toml:"jailer_path"` - JailerPathList []string `toml:"valid_jailer_paths"` Kernel string `toml:"kernel"` CtlPath string `toml:"ctlpath"` - CtlPathList []string `toml:"valid_ctlpaths"` Initrd string `toml:"initrd"` Image string `toml:"image"` Firmware string `toml:"firmware"` @@ -89,17 +86,23 @@ type hypervisor struct { EntropySource string `toml:"entropy_source"` SharedFS string `toml:"shared_fs"` VirtioFSDaemon string `toml:"virtio_fs_daemon"` - VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` VirtioFSCache string `toml:"virtio_fs_cache"` + VhostUserStorePath string `toml:"vhost_user_store_path"` + FileBackedMemRootDir string `toml:"file_mem_backend"` + GuestHookPath string `toml:"guest_hook_path"` + GuestMemoryDumpPath string `toml:"guest_memory_dump_path"` + HypervisorPathList []string `toml:"valid_hypervisor_paths"` + JailerPathList []string `toml:"valid_jailer_paths"` + CtlPathList []string `toml:"valid_ctlpaths"` + VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` PFlashList []string `toml:"pflashes"` - VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` - BlockDeviceCacheSet bool `toml:"block_device_cache_set"` - BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` - BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` - EnableVhostUserStore bool `toml:"enable_vhost_user_store"` - VhostUserStorePath string `toml:"vhost_user_store_path"` VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"` + FileBackedMemRootList []string `toml:"valid_file_mem_backends"` + EnableAnnotations []string `toml:"enable_annotations"` + RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` + TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` + VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` NumVCPUs int32 `toml:"default_vcpus"` DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"` MemorySize uint32 `toml:"default_memory"` @@ -108,14 +111,16 @@ type hypervisor struct { DefaultBridges uint32 `toml:"default_bridges"` Msize9p uint32 `toml:"msize_9p"` PCIeRootPort uint32 `toml:"pcie_root_port"` + BlockDeviceCacheSet bool `toml:"block_device_cache_set"` + BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` + BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` + EnableVhostUserStore bool `toml:"enable_vhost_user_store"` DisableBlockDeviceUse bool `toml:"disable_block_device_use"` MemPrealloc bool `toml:"enable_mem_prealloc"` HugePages bool `toml:"enable_hugepages"` VirtioMem bool `toml:"enable_virtio_mem"` IOMMU bool `toml:"enable_iommu"` IOMMUPlatform bool `toml:"enable_iommu_platform"` - FileBackedMemRootDir string `toml:"file_mem_backend"` - FileBackedMemRootList []string `toml:"valid_file_mem_backends"` Swap bool `toml:"enable_swap"` Debug bool `toml:"enable_debug"` DisableNestingChecks bool `toml:"disable_nesting_checks"` @@ -123,35 +128,30 @@ type hypervisor struct { DisableImageNvdimm bool `toml:"disable_image_nvdimm"` HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"` DisableVhostNet bool `toml:"disable_vhost_net"` - GuestHookPath string `toml:"guest_hook_path"` - RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` - TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` - EnableAnnotations []string `toml:"enable_annotations"` - GuestMemoryDumpPath string `toml:"guest_memory_dump_path"` GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"` } type runtime struct { + InterNetworkModel string `toml:"internetworking_model"` + JaegerEndpoint string `toml:"jaeger_endpoint"` + JaegerUser string `toml:"jaeger_user"` + JaegerPassword string `toml:"jaeger_password"` + SandboxBindMounts []string `toml:"sandbox_bind_mounts"` + Experimental []string `toml:"experimental"` Debug bool `toml:"enable_debug"` Tracing bool `toml:"enable_tracing"` DisableNewNetNs bool `toml:"disable_new_netns"` DisableGuestSeccomp bool `toml:"disable_guest_seccomp"` SandboxCgroupOnly bool `toml:"sandbox_cgroup_only"` - SandboxBindMounts []string `toml:"sandbox_bind_mounts"` - Experimental []string `toml:"experimental"` - InterNetworkModel string `toml:"internetworking_model"` EnablePprof bool `toml:"enable_pprof"` - JaegerEndpoint string `toml:"jaeger_endpoint"` - JaegerUser string `toml:"jaeger_user"` - JaegerPassword string `toml:"jaeger_password"` } type agent struct { - Debug bool `toml:"enable_debug"` - Tracing bool `toml:"enable_tracing"` TraceMode string `toml:"trace_mode"` TraceType string `toml:"trace_type"` KernelModules []string `toml:"kernel_modules"` + Debug bool `toml:"enable_debug"` + Tracing bool `toml:"enable_tracing"` DebugConsoleEnabled bool `toml:"debug_console_enabled"` } @@ -449,20 +449,12 @@ func (h hypervisor) getInitrdAndImage() (initrd string, image string, err error) return } -func (h hypervisor) getRxRateLimiterCfg() (uint64, error) { - if h.RxRateLimiterMaxRate < 0 { - return 0, fmt.Errorf("rx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.RxRateLimiterMaxRate) - } - - return h.RxRateLimiterMaxRate, nil +func (h hypervisor) getRxRateLimiterCfg() uint64 { + return h.RxRateLimiterMaxRate } -func (h hypervisor) getTxRateLimiterCfg() (uint64, error) { - if h.TxRateLimiterMaxRate < 0 { - return 0, fmt.Errorf("tx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.TxRateLimiterMaxRate) - } - - return h.TxRateLimiterMaxRate, nil +func (h hypervisor) getTxRateLimiterCfg() uint64 { + return h.TxRateLimiterMaxRate } func (h hypervisor) getIOMMUPlatform() bool { @@ -547,15 +539,8 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { return vc.HypervisorConfig{}, err } - rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } - - txRateLimiterMaxRate, err := h.getTxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } + rxRateLimiterMaxRate := h.getRxRateLimiterCfg() + txRateLimiterMaxRate := h.getTxRateLimiterCfg() return vc.HypervisorConfig{ HypervisorPath: hypervisor, @@ -656,15 +641,8 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { return vc.HypervisorConfig{}, err } - rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } - - txRateLimiterMaxRate, err := h.getTxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } + rxRateLimiterMaxRate := h.getRxRateLimiterCfg() + txRateLimiterMaxRate := h.getTxRateLimiterCfg() return vc.HypervisorConfig{ HypervisorPath: hypervisor, diff --git a/src/runtime/virtcontainers/acrn.go b/src/runtime/virtcontainers/acrn.go index e9005f183d..f04ebd6e53 100644 --- a/src/runtime/virtcontainers/acrn.go +++ b/src/runtime/virtcontainers/acrn.go @@ -420,7 +420,7 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa // startSandbox will start the Sandbox's VM. func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error { - span, ctx := a.trace(ctx, "startSandbox") + span, _ := a.trace(ctx, "startSandbox") defer span.End() if a.config.Debug { @@ -570,7 +570,7 @@ func (a *Acrn) updateBlockDevice(drive *config.BlockDrive) error { } func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { - span, ctx := a.trace(ctx, "hotplugAddDevice") + span, _ := a.trace(ctx, "hotplugAddDevice") defer span.End() switch devType { @@ -584,7 +584,7 @@ func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devTyp } func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { - span, ctx := a.trace(ctx, "hotplugRemoveDevice") + span, _ := a.trace(ctx, "hotplugRemoveDevice") defer span.End() // Not supported. return success diff --git a/src/runtime/virtcontainers/api.go b/src/runtime/virtcontainers/api.go index 3cb6fe66c3..22e0634730 100644 --- a/src/runtime/virtcontainers/api.go +++ b/src/runtime/virtcontainers/api.go @@ -53,7 +53,7 @@ func SetLogger(ctx context.Context, logger *logrus.Entry) { // CreateSandbox is the virtcontainers sandbox creation entry point. // CreateSandbox creates a sandbox and its containers. It does not start them. func CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (VCSandbox, error) { - span, ctx := trace(ctx, "CreateSandbox") + span, _ := trace(ctx, "CreateSandbox") defer span.End() s, err := createSandboxFromConfig(ctx, sandboxConfig, factory) @@ -62,7 +62,7 @@ func CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac } func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (_ *Sandbox, err error) { - span, ctx := trace(ctx, "createSandboxFromConfig") + span, _ := trace(ctx, "createSandboxFromConfig") defer span.End() // Create the sandbox. @@ -136,7 +136,7 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f // in the sandbox left, do stop the sandbox and delete it. Those serial operations will be done exclusively by // locking the sandbox. func CleanupContainer(ctx context.Context, sandboxID, containerID string, force bool) error { - span, ctx := trace(ctx, "CleanupContainer") + span, _ := trace(ctx, "CleanupContainer") defer span.End() if sandboxID == "" { diff --git a/src/runtime/virtcontainers/api_test.go b/src/runtime/virtcontainers/api_test.go index 97bc1c4041..b2aa32833f 100644 --- a/src/runtime/virtcontainers/api_test.go +++ b/src/runtime/virtcontainers/api_test.go @@ -15,7 +15,6 @@ import ( "testing" ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils" - "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock" @@ -74,16 +73,6 @@ func newBasicTestCmd() types.Cmd { return cmd } -func rmSandboxDir(sid string) error { - store, err := persist.GetDriver() - if err != nil { - return fmt.Errorf("failed to get fs persist driver: %v", err) - } - - store.Destroy(sid) - return nil -} - func newTestSandboxConfigNoop() SandboxConfig { bundlePath := filepath.Join(testDir, testBundle) containerAnnotations[annotations.BundlePathKey] = bundlePath @@ -207,26 +196,6 @@ func TestCreateSandboxFailing(t *testing.T) { * Benchmarks */ -func createNewSandboxConfig(hType HypervisorType) SandboxConfig { - hypervisorConfig := HypervisorConfig{ - KernelPath: "/usr/share/kata-containers/vmlinux.container", - ImagePath: "/usr/share/kata-containers/kata-containers.img", - HypervisorPath: "/usr/bin/qemu-system-x86_64", - } - - netConfig := NetworkConfig{} - - return SandboxConfig{ - ID: testSandboxID, - HypervisorType: hType, - HypervisorConfig: hypervisorConfig, - - AgentConfig: KataAgentConfig{}, - - NetworkConfig: netConfig, - } -} - func newTestContainerConfigNoop(contID string) ContainerConfig { // Define the container command and bundle. container := ContainerConfig{ diff --git a/src/runtime/virtcontainers/clh.go b/src/runtime/virtcontainers/clh.go index f321765304..fcf346808f 100644 --- a/src/runtime/virtcontainers/clh.go +++ b/src/runtime/virtcontainers/clh.go @@ -339,7 +339,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ // startSandbox will start the VMM and boot the virtual machine for the given sandbox. func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error { - span, ctx := clh.trace(ctx, "startSandbox") + span, _ := clh.trace(ctx, "startSandbox") defer span.End() ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) @@ -492,7 +492,7 @@ func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interf } func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { - span, ctx := clh.trace(ctx, "hotplugRemoveDevice") + span, _ := clh.trace(ctx, "hotplugRemoveDevice") defer span.End() var deviceID string @@ -576,7 +576,7 @@ func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, m } cl := clh.client() - ctx, cancelResize := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) + ctx, cancelResize := context.WithTimeout(ctx, clhAPITimeout*time.Second) defer cancelResize() // OpenApi does not support uint64, convert to int64 @@ -620,7 +620,7 @@ func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (c } // Resize (hot-plug) vCPUs via HTTP API - ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) + ctx, cancel := context.WithTimeout(ctx, clhAPITimeout*time.Second) defer cancel() if _, err = cl.VmResizePut(ctx, chclient.VmResize{DesiredVcpus: int32(reqVCPUs)}); err != nil { return currentVCPUs, newVCPUs, errors.Wrap(err, "[clh] VmResizePut failed") @@ -653,7 +653,7 @@ func (clh *cloudHypervisor) resumeSandbox(ctx context.Context) error { // stopSandbox will stop the Sandbox's VM. func (clh *cloudHypervisor) stopSandbox(ctx context.Context) (err error) { - span, ctx := clh.trace(ctx, "stopSandbox") + span, _ := clh.trace(ctx, "stopSandbox") defer span.End() clh.Logger().WithField("function", "stopSandbox").Info("Stop Sandbox") return clh.terminate(ctx) @@ -757,7 +757,7 @@ func (clh *cloudHypervisor) trace(parent context.Context, name string) (otelTrac } func (clh *cloudHypervisor) terminate(ctx context.Context) (err error) { - span, ctx := clh.trace(ctx, "terminate") + span, _ := clh.trace(ctx, "terminate") defer span.End() pid := clh.state.PID diff --git a/src/runtime/virtcontainers/container.go b/src/runtime/virtcontainers/container.go index 18a5f7f8e0..13f2f8f6cd 100644 --- a/src/runtime/virtcontainers/container.go +++ b/src/runtime/virtcontainers/container.go @@ -700,7 +700,7 @@ func (c *Container) createBlockDevices(ctx context.Context) error { // newContainer creates a Container structure from a sandbox and a container configuration. func newContainer(ctx context.Context, sandbox *Sandbox, contConfig *ContainerConfig) (*Container, error) { - span, ctx := sandbox.trace(ctx, "newContainer") + span, _ := sandbox.trace(ctx, "newContainer") defer span.End() if !contConfig.valid() { diff --git a/src/runtime/virtcontainers/factory/factory.go b/src/runtime/virtcontainers/factory/factory.go index 6b6e99d2d2..34dabdc536 100644 --- a/src/runtime/virtcontainers/factory/factory.go +++ b/src/runtime/virtcontainers/factory/factory.go @@ -141,7 +141,7 @@ func (f *factory) checkConfig(config vc.VMConfig) error { // GetVM returns a working blank VM created by the factory. func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) { - span, ctx := trace(ctx, "GetVM") + span, _ := trace(ctx, "GetVM") defer span.End() hypervisorConfig := config.HypervisorConfig diff --git a/src/runtime/virtcontainers/fc.go b/src/runtime/virtcontainers/fc.go index 25aebd1d8f..3d2ae97f2d 100644 --- a/src/runtime/virtcontainers/fc.go +++ b/src/runtime/virtcontainers/fc.go @@ -200,7 +200,7 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N fc.ctx = ctx var span otelTrace.Span - span, ctx = fc.trace(ctx, "createSandbox") + span, _ = fc.trace(ctx, "createSandbox") defer span.End() //TODO: check validity of the hypervisor config provided @@ -325,7 +325,7 @@ func (fc *firecracker) checkVersion(version string) error { // waitVMMRunning will wait for timeout seconds for the VMM to be up and running. func (fc *firecracker) waitVMMRunning(ctx context.Context, timeout int) error { - span, ctx := fc.trace(ctx, "wait VMM to be running") + span, _ := fc.trace(ctx, "wait VMM to be running") defer span.End() if timeout < 0 { @@ -347,7 +347,7 @@ func (fc *firecracker) waitVMMRunning(ctx context.Context, timeout int) error { } func (fc *firecracker) fcInit(ctx context.Context, timeout int) error { - span, ctx := fc.trace(ctx, "fcInit") + span, _ := fc.trace(ctx, "fcInit") defer span.End() var err error @@ -467,7 +467,7 @@ func (fc *firecracker) fcEnd(ctx context.Context) (err error) { } func (fc *firecracker) client(ctx context.Context) *client.Firecracker { - span, ctx := fc.trace(ctx, "client") + span, _ := fc.trace(ctx, "client") defer span.End() if fc.connection == nil { @@ -762,7 +762,7 @@ func (fc *firecracker) fcInitConfiguration(ctx context.Context) error { // In the context of firecracker, this will start the hypervisor, // for configuration, but not yet start the actual virtual machine func (fc *firecracker) startSandbox(ctx context.Context, timeout int) error { - span, ctx := fc.trace(ctx, "startSandbox") + span, _ := fc.trace(ctx, "startSandbox") defer span.End() if err := fc.fcInitConfiguration(ctx); err != nil { @@ -875,7 +875,7 @@ func (fc *firecracker) cleanupJail(ctx context.Context) { // stopSandbox will stop the Sandbox's VM. func (fc *firecracker) stopSandbox(ctx context.Context) (err error) { - span, ctx := fc.trace(ctx, "stopSandbox") + span, _ := fc.trace(ctx, "stopSandbox") defer span.End() return fc.fcEnd(ctx) @@ -996,7 +996,7 @@ func (fc *firecracker) fcAddBlockDrive(ctx context.Context, drive config.BlockDr // Firecracker supports replacing the host drive used once the VM has booted up func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string) error { - span, ctx := fc.trace(ctx, "fcUpdateBlockDrive") + span, _ := fc.trace(ctx, "fcUpdateBlockDrive") defer span.End() // Use the global block index as an index into the pool of the devices @@ -1020,7 +1020,7 @@ func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string) // addDevice will add extra devices to firecracker. Limited to configure before the // virtual machine starts. Devices include drivers and network interfaces only. func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error { - span, ctx := fc.trace(ctx, "addDevice") + span, _ := fc.trace(ctx, "addDevice") defer span.End() fc.state.RLock() @@ -1081,7 +1081,7 @@ func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.Bloc // hotplugAddDevice supported in Firecracker VMM func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { - span, ctx := fc.trace(ctx, "hotplugAddDevice") + span, _ := fc.trace(ctx, "hotplugAddDevice") defer span.End() switch devType { @@ -1097,7 +1097,7 @@ func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{} // hotplugRemoveDevice supported in Firecracker VMM func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { - span, ctx := fc.trace(ctx, "hotplugRemoveDevice") + span, _ := fc.trace(ctx, "hotplugRemoveDevice") defer span.End() switch devType { @@ -1245,9 +1245,8 @@ func revertBytes(num uint64) uint64 { b := num % 1000 if a == 0 { return num - } else { - return 1024*revertBytes(a) + b } + return 1024*revertBytes(a) + b } func (fc *firecracker) setSandbox(sandbox *Sandbox) { diff --git a/src/runtime/virtcontainers/fc_metrics.go b/src/runtime/virtcontainers/fc_metrics.go index 2a608508bb..bb0d423e58 100644 --- a/src/runtime/virtcontainers/fc_metrics.go +++ b/src/runtime/virtcontainers/fc_metrics.go @@ -174,11 +174,11 @@ func registerFirecrackerMetrics() { // updateFirecrackerMetrics update all metrics to the latest values. func updateFirecrackerMetrics(fm *FirecrackerMetrics) { - // set metrics for ApiServerMetrics - apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.ApiServer.ProcessStartupTimeUs)) - apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.ApiServer.ProcessStartupTimeCpuUs)) - apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.ApiServer.SyncResponseFails)) - apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.ApiServer.SyncVmmSendTimeoutCount)) + // set metrics for APIServerMetrics + apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.APIServer.ProcessStartupTimeUs)) + apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.APIServer.ProcessStartupTimeCPUUs)) + apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.APIServer.SyncResponseFails)) + apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.APIServer.SyncVmmSendTimeoutCount)) // set metrics for BlockDeviceMetrics blockDeviceMetrics.WithLabelValues("activate_fails").Set(float64(fm.Block.ActivateFails)) @@ -199,10 +199,10 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { blockDeviceMetrics.WithLabelValues("rate_limiter_throttled_events").Set(float64(fm.Block.RateLimiterThrottledEvents)) // set metrics for GetRequestsMetrics - getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetApiRequests.InstanceInfoCount)) - getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetApiRequests.InstanceInfoFails)) - getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetApiRequests.MachineCfgCount)) - getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetApiRequests.MachineCfgFails)) + getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetAPIRequests.InstanceInfoCount)) + getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetAPIRequests.InstanceInfoFails)) + getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetAPIRequests.MachineCfgCount)) + getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetAPIRequests.MachineCfgFails)) // set metrics for I8042DeviceMetrics i8042DeviceMetrics.WithLabelValues("error_count").Set(float64(fm.I8042.ErrorCount)) @@ -216,13 +216,13 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { performanceMetrics.WithLabelValues("full_create_snapshot").Set(float64(fm.LatenciesUs.FullCreateSnapshot)) performanceMetrics.WithLabelValues("diff_create_snapshot").Set(float64(fm.LatenciesUs.DiffCreateSnapshot)) performanceMetrics.WithLabelValues("load_snapshot").Set(float64(fm.LatenciesUs.LoadSnapshot)) - performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVm)) - performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVm)) + performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVM)) + performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVM)) performanceMetrics.WithLabelValues("vmm_full_create_snapshot").Set(float64(fm.LatenciesUs.VmmFullCreateSnapshot)) performanceMetrics.WithLabelValues("vmm_diff_create_snapshot").Set(float64(fm.LatenciesUs.VmmDiffCreateSnapshot)) performanceMetrics.WithLabelValues("vmm_load_snapshot").Set(float64(fm.LatenciesUs.VmmLoadSnapshot)) - performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVm)) - performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVm)) + performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVM)) + performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVM)) // set metrics for LoggerSystemMetrics loggerSystemMetrics.WithLabelValues("missed_metrics_count").Set(float64(fm.Logger.MissedMetricsCount)) @@ -273,28 +273,28 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { netDeviceMetrics.WithLabelValues("tx_spoofed_mac_count").Set(float64(fm.Net.TxSpoofedMacCount)) // set metrics for PatchRequestsMetrics - patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchApiRequests.DriveCount)) - patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchApiRequests.DriveFails)) - patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchApiRequests.NetworkCount)) - patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchApiRequests.NetworkFails)) - patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchApiRequests.MachineCfgCount)) - patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchApiRequests.MachineCfgFails)) + patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchAPIRequests.DriveCount)) + patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchAPIRequests.DriveFails)) + patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchAPIRequests.NetworkCount)) + patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchAPIRequests.NetworkFails)) + patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchAPIRequests.MachineCfgCount)) + patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchAPIRequests.MachineCfgFails)) // set metrics for PutRequestsMetrics - putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutApiRequests.ActionsCount)) - putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutApiRequests.ActionsFails)) - putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutApiRequests.BootSourceCount)) - putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutApiRequests.BootSourceFails)) - putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutApiRequests.DriveCount)) - putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutApiRequests.DriveFails)) - putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutApiRequests.LoggerCount)) - putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutApiRequests.LoggerFails)) - putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutApiRequests.MachineCfgCount)) - putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutApiRequests.MachineCfgFails)) - putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutApiRequests.MetricsCount)) - putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutApiRequests.MetricsFails)) - putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutApiRequests.NetworkCount)) - putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutApiRequests.NetworkFails)) + putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutAPIRequests.ActionsCount)) + putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutAPIRequests.ActionsFails)) + putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutAPIRequests.BootSourceCount)) + putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutAPIRequests.BootSourceFails)) + putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutAPIRequests.DriveCount)) + putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutAPIRequests.DriveFails)) + putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutAPIRequests.LoggerCount)) + putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutAPIRequests.LoggerFails)) + putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutAPIRequests.MachineCfgCount)) + putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutAPIRequests.MachineCfgFails)) + putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutAPIRequests.MetricsCount)) + putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutAPIRequests.MetricsFails)) + putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutAPIRequests.NetworkCount)) + putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutAPIRequests.NetworkFails)) // set metrics for RTCDeviceMetrics rTCDeviceMetrics.WithLabelValues("error_count").Set(float64(fm.Rtc.ErrorCount)) @@ -310,7 +310,7 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { vcpuMetrics.WithLabelValues("exit_mmio_read").Set(float64(fm.Vcpu.ExitMmioRead)) vcpuMetrics.WithLabelValues("exit_mmio_write").Set(float64(fm.Vcpu.ExitMmioWrite)) vcpuMetrics.WithLabelValues("failures").Set(float64(fm.Vcpu.Failures)) - vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCpuid)) + vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCPUid)) // set metrics for VmmMetrics vmmMetrics.WithLabelValues("device_events").Set(float64(fm.Vmm.DeviceEvents)) @@ -355,11 +355,11 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { // Structure storing all metrics while enforcing serialization support on them. type FirecrackerMetrics struct { // API Server related metrics. - ApiServer ApiServerMetrics `json:"api_server"` + APIServer APIServerMetrics `json:"api_server"` // A block device's related metrics. Block BlockDeviceMetrics `json:"block"` // Metrics related to API GET requests. - GetApiRequests GetRequestsMetrics `json:"get_api_requests"` + GetAPIRequests GetRequestsMetrics `json:"get_api_requests"` // Metrics related to the i8042 device. I8042 I8042DeviceMetrics `json:"i8042"` // Metrics related to performance measurements. @@ -371,9 +371,9 @@ type FirecrackerMetrics struct { // A network device's related metrics. Net NetDeviceMetrics `json:"net"` // Metrics related to API PATCH requests. - PatchApiRequests PatchRequestsMetrics `json:"patch_api_requests"` + PatchAPIRequests PatchRequestsMetrics `json:"patch_api_requests"` // Metrics related to API PUT requests. - PutApiRequests PutRequestsMetrics `json:"put_api_requests"` + PutAPIRequests PutRequestsMetrics `json:"put_api_requests"` // Metrics related to the RTC device. Rtc RTCDeviceMetrics `json:"rtc"` // Metrics related to seccomp filtering. @@ -391,11 +391,11 @@ type FirecrackerMetrics struct { } // API Server related metrics. -type ApiServerMetrics struct { +type APIServerMetrics struct { // Measures the process's startup time in microseconds. ProcessStartupTimeUs uint64 `json:"process_startup_time_us"` // Measures the cpu's startup time in microseconds. - ProcessStartupTimeCpuUs uint64 `json:"process_startup_time_cpu_us"` + ProcessStartupTimeCPUUs uint64 `json:"process_startup_time_cpu_us"` // Number of failures on API requests triggered by internal errors. SyncResponseFails uint64 `json:"sync_response_fails"` // Number of timeouts during communication with the VMM. @@ -475,9 +475,9 @@ type PerformanceMetrics struct { // Measures the snapshot load time, at the API (user) level, in microseconds. LoadSnapshot uint64 `json:"load_snapshot"` // Measures the microVM pausing duration, at the API (user) level, in microseconds. - PauseVm uint64 `json:"pause_vm"` + PauseVM uint64 `json:"pause_vm"` // Measures the microVM resuming duration, at the API (user) level, in microseconds. - ResumeVm uint64 `json:"resume_vm"` + ResumeVM uint64 `json:"resume_vm"` // Measures the snapshot full create time, at the VMM level, in microseconds. VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"` // Measures the snapshot diff create time, at the VMM level, in microseconds. @@ -485,9 +485,9 @@ type PerformanceMetrics struct { // Measures the snapshot load time, at the VMM level, in microseconds. VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"` // Measures the microVM pausing duration, at the VMM level, in microseconds. - VmmPauseVm uint64 `json:"vmm_pause_vm"` + VmmPauseVM uint64 `json:"vmm_pause_vm"` // Measures the microVM resuming duration, at the VMM level, in microseconds. - VmmResumeVm uint64 `json:"vmm_resume_vm"` + VmmResumeVM uint64 `json:"vmm_resume_vm"` } // Logging related metrics. @@ -662,7 +662,7 @@ type VcpuMetrics struct { // Number of errors during this VCPU's run. Failures uint64 `json:"failures"` // Failures in configuring the CPUID. - FilterCpuid uint64 `json:"filter_cpuid"` + FilterCPUid uint64 `json:"filter_cpuid"` } // Metrics related to the virtual machine manager. diff --git a/src/runtime/virtcontainers/kata_agent.go b/src/runtime/virtcontainers/kata_agent.go index 2e1bd72761..a288288ee0 100644 --- a/src/runtime/virtcontainers/kata_agent.go +++ b/src/runtime/virtcontainers/kata_agent.go @@ -26,7 +26,6 @@ import ( "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc" vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" - ns "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/nsenter" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/uuid" @@ -496,7 +495,7 @@ func (k *kataAgent) setupSharedPath(ctx context.Context, sandbox *Sandbox) error } func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error { - span, ctx := k.trace(ctx, "createSandbox") + span, _ := k.trace(ctx, "createSandbox") defer span.End() if err := k.setupSharedPath(ctx, sandbox); err != nil { @@ -583,7 +582,7 @@ func cmdEnvsToStringSlice(ev []types.EnvVar) []string { } func (k *kataAgent) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) { - span, ctx := k.trace(ctx, "exec") + span, _ := k.trace(ctx, "exec") defer span.End() var kataProcess *grpc.Process @@ -755,7 +754,7 @@ func (k *kataAgent) getDNS(sandbox *Sandbox) ([]string, error) { } func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error { - span, ctx := k.trace(ctx, "startSandbox") + span, _ := k.trace(ctx, "startSandbox") defer span.End() if err := k.setAgentURL(); err != nil { @@ -910,7 +909,7 @@ func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage { } func (k *kataAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error { - span, ctx := k.trace(ctx, "stopSandbox") + span, _ := k.trace(ctx, "stopSandbox") defer span.End() req := &grpc.DestroySandboxRequest{} @@ -1272,7 +1271,7 @@ func (k *kataAgent) buildContainerRootfs(ctx context.Context, sandbox *Sandbox, } func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (p *Process, err error) { - span, ctx := k.trace(ctx, "createContainer") + span, _ := k.trace(ctx, "createContainer") defer span.End() var ctrStorages []*grpc.Storage @@ -1384,14 +1383,6 @@ func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Co return nil, err } - enterNSList := []ns.Namespace{} - if sandbox.networkNS.NetNsPath != "" { - enterNSList = append(enterNSList, ns.Namespace{ - Path: sandbox.networkNS.NetNsPath, - Type: ns.NSTypeNet, - }) - } - return buildProcessFromExecID(req.ExecId) } @@ -1602,7 +1593,7 @@ func (k *kataAgent) handlePidNamespace(grpcSpec *grpc.Spec, sandbox *Sandbox) bo } func (k *kataAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error { - span, ctx := k.trace(ctx, "startContainer") + span, _ := k.trace(ctx, "startContainer") defer span.End() req := &grpc.StartContainerRequest{ @@ -1614,7 +1605,7 @@ func (k *kataAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Con } func (k *kataAgent) stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error { - span, ctx := k.trace(ctx, "stopContainer") + span, _ := k.trace(ctx, "stopContainer") defer span.End() _, err := k.sendReq(ctx, &grpc.RemoveContainerRequest{ContainerId: c.id}) @@ -1778,7 +1769,7 @@ func (k *kataAgent) connect(ctx context.Context) error { return nil } - span, ctx := k.trace(ctx, "connect") + span, _ := k.trace(ctx, "connect") defer span.End() // This is for the first connection only, to prevent race @@ -1824,7 +1815,7 @@ func (k *kataAgent) disconnect(ctx context.Context) error { // check grpc server is serving func (k *kataAgent) check(ctx context.Context) error { - span, ctx := k.trace(ctx, "check") + span, _ := k.trace(ctx, "check") defer span.End() _, err := k.sendReq(ctx, &grpc.CheckRequest{}) @@ -1835,7 +1826,7 @@ func (k *kataAgent) check(ctx context.Context) error { } func (k *kataAgent) waitProcess(ctx context.Context, c *Container, processID string) (int32, error) { - span, ctx := k.trace(ctx, "waitProcess") + span, _ := k.trace(ctx, "waitProcess") defer span.End() resp, err := k.sendReq(ctx, &grpc.WaitProcessRequest{ @@ -2022,7 +2013,7 @@ func (k *kataAgent) sendReq(spanCtx context.Context, request interface{}) (inter k.Logger().WithField("name", msgName).WithField("req", message.String()).Debug("sending request") defer func() { - agentRpcDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond))) + agentRPCDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond))) }() return handler(ctx, request) } diff --git a/src/runtime/virtcontainers/kata_agent_test.go b/src/runtime/virtcontainers/kata_agent_test.go index bf3acf7205..cafc1c45a8 100644 --- a/src/runtime/virtcontainers/kata_agent_test.go +++ b/src/runtime/virtcontainers/kata_agent_test.go @@ -360,6 +360,7 @@ func TestHandleBlockVolume(t *testing.T) { bPCIPath, err := vcTypes.PciPathFromString("03/04") assert.NoError(t, err) dPCIPath, err := vcTypes.PciPathFromString("04/05") + assert.NoError(t, err) vDev := drivers.NewVhostUserBlkDevice(&config.DeviceInfo{ID: vDevID}) bDev := drivers.NewBlockDevice(&config.DeviceInfo{ID: bDevID}) diff --git a/src/runtime/virtcontainers/mock_agent.go b/src/runtime/virtcontainers/mock_agent.go index 8aac06d872..6807ac72a5 100644 --- a/src/runtime/virtcontainers/mock_agent.go +++ b/src/runtime/virtcontainers/mock_agent.go @@ -22,6 +22,7 @@ import ( type mockAgent struct { } +// nolint:golint func NewMockAgent() agent { return &mockAgent{} } @@ -237,6 +238,6 @@ func (n *mockAgent) getOOMEvent(ctx context.Context) (string, error) { return "", nil } -func (k *mockAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) { +func (n *mockAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) { return nil, nil } diff --git a/src/runtime/virtcontainers/mount.go b/src/runtime/virtcontainers/mount.go index a562ee3a6f..16234ac1e8 100644 --- a/src/runtime/virtcontainers/mount.go +++ b/src/runtime/virtcontainers/mount.go @@ -219,7 +219,7 @@ const mountPerm = os.FileMode(0755) // * recursively create the destination // pgtypes stands for propagation types, which are shared, private, slave, and ubind. func bindMount(ctx context.Context, source, destination string, readonly bool, pgtypes string) error { - span, ctx := trace(ctx, "bindMount") + span, _ := trace(ctx, "bindMount") defer span.End() if source == "" { @@ -347,7 +347,7 @@ func bindUnmountContainerRootfs(ctx context.Context, sharedDir, cID string) erro } func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbox) error { - span, ctx := trace(ctx, "bindUnmountAllRootfs") + span, _ := trace(ctx, "bindUnmountAllRootfs") defer span.End() var errors *merr.Error diff --git a/src/runtime/virtcontainers/network.go b/src/runtime/virtcontainers/network.go index 42739118d8..3c51c0fb58 100644 --- a/src/runtime/virtcontainers/network.go +++ b/src/runtime/virtcontainers/network.go @@ -1273,7 +1273,7 @@ func (n *Network) Run(ctx context.Context, networkNSPath string, cb func() error // Add adds all needed interfaces inside the network namespace. func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, hotplug bool) ([]Endpoint, error) { - span, ctx := n.trace(ctx, "Add") + span, _ := n.trace(ctx, "Add") defer span.End() endpoints, err := createEndpointsFromScan(config.NetNSPath, config) @@ -1354,7 +1354,7 @@ func (n *Network) PostAdd(ctx context.Context, ns *NetworkNamespace, hotplug boo // Remove network endpoints in the network namespace. It also deletes the network // namespace in case the namespace has been created by us. func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor hypervisor) error { - span, ctx := n.trace(ctx, "Remove") + span, _ := n.trace(ctx, "Remove") defer span.End() for _, endpoint := range ns.Endpoints { diff --git a/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go b/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go index 961fc959c7..2e21779334 100644 --- a/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go +++ b/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go @@ -34,7 +34,6 @@ const ( ) var defaultDialTimeout = 15 * time.Second -var defaultCloseTimeout = 5 * time.Second var hybridVSockPort uint32 @@ -70,8 +69,7 @@ func NewAgentClient(ctx context.Context, sock string) (*AgentClient, error) { } var conn net.Conn - var d dialer - d = agentDialer(parsedAddr) + var d = agentDialer(parsedAddr) conn, err = d(grpcAddr, defaultDialTimeout) if err != nil { return nil, err diff --git a/src/runtime/virtcontainers/pkg/oci/utils.go b/src/runtime/virtcontainers/pkg/oci/utils.go index 1fa885bb80..0663318046 100644 --- a/src/runtime/virtcontainers/pkg/oci/utils.go +++ b/src/runtime/virtcontainers/pkg/oci/utils.go @@ -100,19 +100,25 @@ type RuntimeConfig struct { AgentConfig vc.KataAgentConfig - Console string - //Determines how the VM should be connected to the //the container network interface InterNetworkModel vc.NetInterworkingModel FactoryConfig FactoryConfig - Debug bool - Trace bool + Console string JaegerEndpoint string JaegerUser string JaegerPassword string + //Paths to be bindmounted RO into the guest. + SandboxBindMounts []string + + //Experimental features enabled + Experimental []exp.Feature + + Debug bool + Trace bool + //Determines if seccomp should be applied inside guest DisableGuestSeccomp bool @@ -122,12 +128,6 @@ type RuntimeConfig struct { //Determines kata processes are managed only in sandbox cgroup SandboxCgroupOnly bool - //Paths to be bindmounted RO into the guest. - SandboxBindMounts []string - - //Experimental features enabled - Experimental []exp.Feature - // Determines if enable pprof EnablePprof bool } @@ -819,7 +819,7 @@ func addHypervisporNetworkOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConf if value, ok := ocispec.Annotations[vcAnnotations.RxRateLimiterMaxRate]; ok { rxRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64) - if err != nil || rxRateLimiterMaxRate < 0 { + if err != nil { return fmt.Errorf("Error parsing annotation for rx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err) } sbConfig.HypervisorConfig.RxRateLimiterMaxRate = rxRateLimiterMaxRate @@ -827,7 +827,7 @@ func addHypervisporNetworkOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConf if value, ok := ocispec.Annotations[vcAnnotations.TxRateLimiterMaxRate]; ok { txRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64) - if err != nil || txRateLimiterMaxRate < 0 { + if err != nil { return fmt.Errorf("Error parsing annotation for tx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err) } sbConfig.HypervisorConfig.TxRateLimiterMaxRate = txRateLimiterMaxRate diff --git a/src/runtime/virtcontainers/qemu.go b/src/runtime/virtcontainers/qemu.go index 192ce53d91..0c070bf99c 100644 --- a/src/runtime/virtcontainers/qemu.go +++ b/src/runtime/virtcontainers/qemu.go @@ -121,13 +121,9 @@ const ( scsiControllerID = "scsi0" rngID = "rng0" - vsockKernelOption = "agent.use_vsock" fallbackFileBackedMemDir = "/dev/shm" ) -var qemuMajorVersion int -var qemuMinorVersion int - // agnostic list of kernel parameters var defaultKernelParameters = []Param{ {"panic", "1"}, @@ -472,7 +468,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa // Save the tracing context q.ctx = ctx - span, ctx := q.trace(ctx, "createSandbox") + span, _ := q.trace(ctx, "createSandbox") defer span.End() if err := q.setup(ctx, id, hypervisorConfig); err != nil { @@ -776,7 +772,7 @@ func (q *qemu) setupVirtioMem() error { // startSandbox will start the Sandbox's VM. func (q *qemu) startSandbox(ctx context.Context, timeout int) error { - span, ctx := q.trace(ctx, "startSandbox") + span, _ := q.trace(ctx, "startSandbox") defer span.End() if q.config.Debug { @@ -921,9 +917,6 @@ func (q *qemu) waitSandbox(ctx context.Context, timeout int) error { q.qmpMonitorCh.disconn = disconnectCh defer q.qmpShutdown() - qemuMajorVersion = ver.Major - qemuMinorVersion = ver.Minor - q.Logger().WithFields(logrus.Fields{ "qmp-major-version": ver.Major, "qmp-minor-version": ver.Minor, @@ -1024,9 +1017,8 @@ func (q *qemu) togglePauseSandbox(ctx context.Context, pause bool) error { if pause { return q.qmpMonitorCh.qmp.ExecuteStop(q.qmpMonitorCh.ctx) - } else { - return q.qmpMonitorCh.qmp.ExecuteCont(q.qmpMonitorCh.ctx) } + return q.qmpMonitorCh.qmp.ExecuteCont(q.qmpMonitorCh.ctx) } func (q *qemu) qmpSetup() error { @@ -1067,19 +1059,13 @@ func (q *qemu) qmpSetup() error { } func (q *qemu) loopQMPEvent(event chan govmmQemu.QMPEvent) { - for { - select { - case e, open := <-event: - if !open { - q.Logger().Infof("QMP event channel closed") - return - } - q.Logger().WithField("event", e).Debug("got QMP event") - if e.Name == "GUEST_PANICKED" { - go q.handleGuestPanic() - } + for e := range event { + q.Logger().WithField("event", e).Debug("got QMP event") + if e.Name == "GUEST_PANICKED" { + go q.handleGuestPanic() } } + q.Logger().Infof("QMP event channel closed") } func (q *qemu) handleGuestPanic() { @@ -1116,13 +1102,12 @@ func (q *qemu) canDumpGuestMemory(dumpSavePath string) error { exceptMemorySize := guestMemorySizeInBytes * 2 if availSpaceInBytes >= exceptMemorySize { return nil - } else { - return fmt.Errorf("there are not enough free space to store memory dump file. Except %d bytes, but only %d bytes available", exceptMemorySize, availSpaceInBytes) } + return fmt.Errorf("there are not enough free space to store memory dump file. Except %d bytes, but only %d bytes available", exceptMemorySize, availSpaceInBytes) } // dumpSandboxMetaInfo save meta information for debug purpose, includes: -// hypervisor verison, sandbox/container state, hypervisor config +// hypervisor version, sandbox/container state, hypervisor config func (q *qemu) dumpSandboxMetaInfo(dumpSavePath string) { dumpStatePath := filepath.Join(dumpSavePath, "state") @@ -1377,19 +1362,18 @@ func (q *qemu) hotplugBlockDevice(ctx context.Context, drive *config.BlockDrive, if op == addDevice { return q.hotplugAddBlockDevice(ctx, drive, op, devID) - } else { - if q.config.BlockDeviceDriver == config.VirtioBlock { - if err := q.arch.removeDeviceFromBridge(drive.ID); err != nil { - return err - } - } - - if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, devID); err != nil { + } + if q.config.BlockDeviceDriver == config.VirtioBlock { + if err := q.arch.removeDeviceFromBridge(drive.ID); err != nil { return err } - - return q.qmpMonitorCh.qmp.ExecuteBlockdevDel(q.qmpMonitorCh.ctx, drive.ID) } + + if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, devID); err != nil { + return err + } + + return q.qmpMonitorCh.qmp.ExecuteBlockdevDel(q.qmpMonitorCh.ctx, drive.ID) } func (q *qemu) hotplugVhostUserDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation) error { @@ -1625,7 +1609,7 @@ func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType d } func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { - span, ctx := q.trace(ctx, "hotplugAddDevice") + span, _ := q.trace(ctx, "hotplugAddDevice") defer span.End() data, err := q.hotplugDevice(ctx, devInfo, devType, addDevice) @@ -1637,7 +1621,7 @@ func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devTyp } func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { - span, ctx := q.trace(ctx, "hotplugRemoveDevice") + span, _ := q.trace(ctx, "hotplugRemoveDevice") defer span.End() data, err := q.hotplugDevice(ctx, devInfo, devType, removeDevice) @@ -1849,14 +1833,14 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) { } func (q *qemu) pauseSandbox(ctx context.Context) error { - span, ctx := q.trace(ctx, "pauseSandbox") + span, _ := q.trace(ctx, "pauseSandbox") defer span.End() return q.togglePauseSandbox(ctx, true) } func (q *qemu) resumeSandbox(ctx context.Context) error { - span, ctx := q.trace(ctx, "resumeSandbox") + span, _ := q.trace(ctx, "resumeSandbox") defer span.End() return q.togglePauseSandbox(ctx, false) diff --git a/src/runtime/virtcontainers/sandbox.go b/src/runtime/virtcontainers/sandbox.go index ec831913a1..66a77a31a5 100644 --- a/src/runtime/virtcontainers/sandbox.go +++ b/src/runtime/virtcontainers/sandbox.go @@ -445,7 +445,7 @@ func (s *Sandbox) getAndStoreGuestDetails(ctx context.Context) error { // to physically create that sandbox i.e. starts a VM for that sandbox to eventually // be started. func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { - span, ctx := trace(ctx, "createSandbox") + span, _ := trace(ctx, "createSandbox") defer span.End() if err := createAssets(ctx, &sandboxConfig); err != nil { @@ -483,7 +483,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac } func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (sb *Sandbox, retErr error) { - span, ctx := trace(ctx, "newSandbox") + span, _ := trace(ctx, "newSandbox") defer span.End() if !sandboxConfig.valid() { @@ -618,7 +618,7 @@ func (s *Sandbox) createCgroupManager() error { // storeSandbox stores a sandbox config. func (s *Sandbox) storeSandbox(ctx context.Context) error { - span, ctx := s.trace(ctx, "storeSandbox") + span, _ := s.trace(ctx, "storeSandbox") defer span.End() // flush data to storage @@ -628,15 +628,6 @@ func (s *Sandbox) storeSandbox(ctx context.Context) error { return nil } -func rLockSandbox(sandboxID string) (func() error, error) { - store, err := persist.GetDriver() - if err != nil { - return nil, fmt.Errorf("failed to get fs persist driver: %v", err) - } - - return store.Lock(sandboxID, false) -} - func rwLockSandbox(sandboxID string) (func() error, error) { store, err := persist.GetDriver() if err != nil { @@ -761,7 +752,7 @@ func (s *Sandbox) createNetwork(ctx context.Context) error { return nil } - span, ctx := s.trace(ctx, "createNetwork") + span, _ := s.trace(ctx, "createNetwork") defer span.End() s.networkNS = NetworkNamespace{ @@ -951,7 +942,7 @@ func (cw *consoleWatcher) start(s *Sandbox) (err error) { scanner = bufio.NewScanner(cw.conn) case consoleProtoPty: // read-only - cw.ptyConsole, err = os.Open(cw.consoleURL) + cw.ptyConsole, _ = os.Open(cw.consoleURL) scanner = bufio.NewScanner(cw.ptyConsole) default: return fmt.Errorf("unknown console proto %s", cw.proto) @@ -1003,7 +994,7 @@ func (cw *consoleWatcher) stop() { // startVM starts the VM. func (s *Sandbox) startVM(ctx context.Context) (err error) { - span, ctx := s.trace(ctx, "startVM") + span, _ := s.trace(ctx, "startVM") defer span.End() s.Logger().Info("Starting VM") @@ -1084,7 +1075,7 @@ func (s *Sandbox) startVM(ctx context.Context) (err error) { // stopVM: stop the sandbox's VM func (s *Sandbox) stopVM(ctx context.Context) error { - span, ctx := s.trace(ctx, "stopVM") + span, _ := s.trace(ctx, "stopVM") defer span.End() s.Logger().Info("Stopping sandbox in the VM") @@ -1460,7 +1451,7 @@ func (s *Sandbox) ResumeContainer(ctx context.Context, containerID string) error // createContainers registers all containers, create the // containers in the guest and starts one shim per container. func (s *Sandbox) createContainers(ctx context.Context) error { - span, ctx := s.trace(ctx, "createContainers") + span, _ := s.trace(ctx, "createContainers") defer span.End() for _, contConfig := range s.config.Containers { @@ -1532,7 +1523,7 @@ func (s *Sandbox) Start(ctx context.Context) error { // will be destroyed. // When force is true, ignore guest related stop failures. func (s *Sandbox) Stop(ctx context.Context, force bool) error { - span, ctx := s.trace(ctx, "Stop") + span, _ := s.trace(ctx, "Stop") defer span.End() if s.state.State == types.StateStopped { @@ -1643,7 +1634,7 @@ func (s *Sandbox) unsetSandboxBlockIndex(index int) error { // HotplugAddDevice is used for add a device to sandbox // Sandbox implement DeviceReceiver interface from device/api/interface.go func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devType config.DeviceType) error { - span, ctx := s.trace(ctx, "HotplugAddDevice") + span, _ := s.trace(ctx, "HotplugAddDevice") defer span.End() if s.config.SandboxCgroupOnly { diff --git a/src/runtime/virtcontainers/sandbox_metrics.go b/src/runtime/virtcontainers/sandbox_metrics.go index 51e94c0ca4..49e115cfcb 100644 --- a/src/runtime/virtcontainers/sandbox_metrics.go +++ b/src/runtime/virtcontainers/sandbox_metrics.go @@ -62,7 +62,7 @@ var ( Help: "Open FDs for hypervisor.", }) - agentRpcDurationsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + agentRPCDurationsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespaceKatashim, Name: "agent_rpc_durations_histogram_milliseconds", Help: "RPC latency distributions.", @@ -79,7 +79,7 @@ func RegisterMetrics() { prometheus.MustRegister(hypervisorNetdev) prometheus.MustRegister(hypervisorIOStat) prometheus.MustRegister(hypervisorOpenFDs) - prometheus.MustRegister(agentRpcDurationsHistogram) + prometheus.MustRegister(agentRPCDurationsHistogram) } // UpdateRuntimeMetrics update shim/hypervisor's metrics