runtime: fix static check errors

It turns out we have managed to break the static checker in many
difference places with the absence of static checker in github action.
Let's fix them while enabling static checker in github actions...

Signed-off-by: Peng Tao <bergwolf@hyper.sh>
This commit is contained in:
Peng Tao 2021-03-24 16:36:00 +08:00
parent a2dee1f6a0
commit 74192d179d
37 changed files with 203 additions and 378 deletions

View File

@ -63,7 +63,6 @@ const (
moduleParamDir = "parameters" moduleParamDir = "parameters"
successMessageCapable = "System is capable of running " + project successMessageCapable = "System is capable of running " + project
successMessageCreate = "System can currently create " + project successMessageCreate = "System can currently create " + project
successMessageVersion = "Version consistency of " + project + " is verified"
failMessage = "System is not capable of running " + project failMessage = "System is not capable of running " + project
kernelPropertyCorrect = "Kernel property value correct" kernelPropertyCorrect = "Kernel property value correct"
@ -398,7 +397,7 @@ EXAMPLES:
span, _ := katautils.Trace(ctx, "check") span, _ := katautils.Trace(ctx, "check")
defer span.End() defer span.End()
if context.Bool("no-network-checks") == false && os.Getenv(noNetworkEnvVar) == "" { if !context.Bool("no-network-checks") && os.Getenv(noNetworkEnvVar) == "" {
cmd := RelCmdCheck cmd := RelCmdCheck
if context.Bool("only-list-releases") { if context.Bool("only-list-releases") {

View File

@ -48,9 +48,9 @@ var versionTemplate = `{{.AppName}}
` `
func printVersion(ver versionInfo) { func printVersion(ver versionInfo) {
t, err := template.New("version").Parse(versionTemplate) t, _ := template.New("version").Parse(versionTemplate)
if err = t.Execute(os.Stdout, ver); err != nil { if err := t.Execute(os.Stdout, ver); err != nil {
panic(err) panic(err)
} }
} }

View File

@ -64,9 +64,6 @@ var originalLoggerLevel = logrus.WarnLevel
var debug = false var debug = false
// if true, coredump when an internal error occurs or a fatal signal is received
var crashOnError = false
// concrete virtcontainer implementation // concrete virtcontainer implementation
var virtcontainersImpl = &vc.VCImpl{} var virtcontainersImpl = &vc.VCImpl{}
@ -328,7 +325,6 @@ func beforeSubcommands(c *cli.Context) error {
} }
if !subCmdIsCheckCmd { if !subCmdIsCheckCmd {
debug = runtimeConfig.Debug debug = runtimeConfig.Debug
crashOnError = runtimeConfig.Debug
if traceRootSpan != "" { if traceRootSpan != "" {
// Create the tracer. // Create the tracer.

View File

@ -8,7 +8,6 @@ package main
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/json"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
@ -29,7 +28,6 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -44,10 +42,8 @@ const (
// small docker image used to create root filesystems from // small docker image used to create root filesystems from
testDockerImage = "busybox" testDockerImage = "busybox"
testSandboxID = "99999999-9999-9999-99999999999999999" testBundle = "bundle"
testContainerID = "1" testConsole = "/dev/pts/999"
testBundle = "bundle"
testConsole = "/dev/pts/999"
) )
var ( var (
@ -387,44 +383,6 @@ func makeOCIBundle(bundleDir string) error {
return nil return nil
} }
func writeOCIConfigFile(spec specs.Spec, configPath string) error {
if configPath == "" {
return errors.New("BUG: need config file path")
}
bytes, err := json.MarshalIndent(spec, "", "\t")
if err != nil {
return err
}
return ioutil.WriteFile(configPath, bytes, testFileMode)
}
func newSingleContainerStatus(containerID string, containerState types.ContainerState, annotations map[string]string, spec *specs.Spec) vc.ContainerStatus {
return vc.ContainerStatus{
ID: containerID,
State: containerState,
Annotations: annotations,
Spec: spec,
}
}
func execCLICommandFunc(assertHandler *assert.Assertions, cliCommand cli.Command, set *flag.FlagSet, expectedErr bool) {
ctx := createCLIContext(set)
ctx.App.Name = "foo"
fn, ok := cliCommand.Action.(func(context *cli.Context) error)
assertHandler.True(ok)
err := fn(ctx)
if expectedErr {
assertHandler.Error(err)
} else {
assertHandler.Nil(err)
}
}
func createCLIContextWithApp(flagSet *flag.FlagSet, app *cli.App) *cli.Context { func createCLIContextWithApp(flagSet *flag.FlagSet, app *cli.App) *cli.Context {
ctx := cli.NewContext(app, flagSet, nil) ctx := cli.NewContext(app, flagSet, nil)

View File

@ -458,7 +458,7 @@ func TestGetNewReleaseType(t *testing.T) {
} }
data := []testData{ data := []testData{
// Check build metadata (ignored for version comparisions) // Check build metadata (ignored for version comparisons)
{"2.0.0+build", "2.0.0", true, ""}, {"2.0.0+build", "2.0.0", true, ""},
{"2.0.0+build-1", "2.0.0+build-2", true, ""}, {"2.0.0+build-1", "2.0.0+build-2", true, ""},
{"1.12.0+build", "1.12.0", true, ""}, {"1.12.0+build", "1.12.0", true, ""},

View File

@ -189,21 +189,3 @@ func constructVersionInfo(version string) VersionInfo {
} }
} }
func versionEqual(a VersionInfo, b VersionInfo) bool {
av, err := semver.Make(a.Semver)
if err != nil {
return false
}
bv, err := semver.Make(b.Semver)
if err != nil {
return false
}
if av.Major == bv.Major && av.Minor == bv.Minor && av.Patch == bv.Patch {
return true
}
return false
}

View File

@ -1,10 +0,0 @@
// +build !s390x
//
// SPDX-License-Identifier: Apache-2.0
//
package main
func archConvertStatFs(cgroupFsType int) int64 {
return int64(cgroupFsType)
}

View File

@ -1,10 +0,0 @@
// Copyright (c) 2018 IBM
//
// SPDX-License-Identifier: Apache-2.0
//
package main
func archConvertStatFs(cgroupFsType int) uint32 {
return uint32(cgroupFsType)
}

View File

@ -301,7 +301,7 @@ func trace(ctx context.Context, name string) (otelTrace.Span, context.Context) {
} }
func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) { func (s *service) Cleanup(ctx context.Context) (_ *taskAPI.DeleteResponse, err error) {
span, ctx := trace(s.rootCtx, "Cleanup") span, _ := trace(s.rootCtx, "Cleanup")
defer span.End() defer span.End()
//Since the binary cleanup will return the DeleteResponse from stdout to //Since the binary cleanup will return the DeleteResponse from stdout to
@ -412,7 +412,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *
// Start a process // Start a process
func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) { func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAPI.StartResponse, err error) {
span, ctx := trace(s.rootCtx, "Start") span, _ := trace(s.rootCtx, "Start")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -463,7 +463,7 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (_ *taskAP
// Delete the initial process and container // Delete the initial process and container
func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) { func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *taskAPI.DeleteResponse, err error) {
span, ctx := trace(s.rootCtx, "Delete") span, _ := trace(s.rootCtx, "Delete")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -515,7 +515,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (_ *task
// Exec an additional process inside the container // Exec an additional process inside the container
func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) { func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "Exec") span, _ := trace(s.rootCtx, "Exec")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -553,7 +553,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (_ *p
// ResizePty of a process // ResizePty of a process
func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) { func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "ResizePty") span, _ := trace(s.rootCtx, "ResizePty")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -592,7 +592,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (_
// State returns runtime state information for a process // State returns runtime state information for a process
func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) { func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAPI.StateResponse, err error) {
span, ctx := trace(s.rootCtx, "State") span, _ := trace(s.rootCtx, "State")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -644,7 +644,7 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (_ *taskAP
// Pause the container // Pause the container
func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) { func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "Pause") span, _ := trace(s.rootCtx, "Pause")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -683,7 +683,7 @@ func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (_ *ptypes
// Resume the container // Resume the container
func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) { func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "Resume") span, _ := trace(s.rootCtx, "Resume")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -720,7 +720,7 @@ func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (_ *ptyp
// Kill a process with the provided signal // Kill a process with the provided signal
func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) { func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "Kill") span, _ := trace(s.rootCtx, "Kill")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -781,7 +781,7 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (_ *ptypes.E
// Since for kata, it cannot get the process's pid from VM, // Since for kata, it cannot get the process's pid from VM,
// thus only return the Shim's pid directly. // thus only return the Shim's pid directly.
func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) { func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.PidsResponse, err error) {
span, ctx := trace(s.rootCtx, "Pids") span, _ := trace(s.rootCtx, "Pids")
defer span.End() defer span.End()
var processes []*task.ProcessInfo var processes []*task.ProcessInfo
@ -804,7 +804,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (_ *taskAPI.
// CloseIO of a process // CloseIO of a process
func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) { func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "CloseIO") span, _ := trace(s.rootCtx, "CloseIO")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -845,7 +845,7 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (_ *pt
// Checkpoint the container // Checkpoint the container
func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) { func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "Checkpoint") span, _ := trace(s.rootCtx, "Checkpoint")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -859,7 +859,7 @@ func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskReque
// Connect returns shim information such as the shim's pid // Connect returns shim information such as the shim's pid
func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) { func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *taskAPI.ConnectResponse, err error) {
span, ctx := trace(s.rootCtx, "Connect") span, _ := trace(s.rootCtx, "Connect")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -879,7 +879,7 @@ func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (_ *ta
} }
func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) { func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "Shutdown") span, _ := trace(s.rootCtx, "Shutdown")
start := time.Now() start := time.Now()
defer func() { defer func() {
@ -907,7 +907,7 @@ func (s *service) Shutdown(ctx context.Context, r *taskAPI.ShutdownRequest) (_ *
} }
func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) { func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAPI.StatsResponse, err error) {
span, ctx := trace(s.rootCtx, "Stats") span, _ := trace(s.rootCtx, "Stats")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -936,7 +936,7 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (_ *taskAP
// Update a running container // Update a running container
func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) { func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *ptypes.Empty, err error) {
span, ctx := trace(s.rootCtx, "Update") span, _ := trace(s.rootCtx, "Update")
defer span.End() defer span.End()
start := time.Now() start := time.Now()
@ -968,7 +968,7 @@ func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (_ *
// Wait for a process to exit // Wait for a process to exit
func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) { func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (_ *taskAPI.WaitResponse, err error) {
span, ctx := trace(s.rootCtx, "Wait") span, _ := trace(s.rootCtx, "Wait")
defer span.End() defer span.End()
var ret uint32 var ret uint32

View File

@ -65,8 +65,7 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) {
// encode the metrics // encode the metrics
encoder := expfmt.NewEncoder(w, expfmt.FmtText) encoder := expfmt.NewEncoder(w, expfmt.FmtText)
for _, mf := range mfs { for _, mf := range mfs {
if err := encoder.Encode(mf); err != nil { encoder.Encode(mf)
}
} }
// if using an old agent, only collect shim/sandbox metrics. // if using an old agent, only collect shim/sandbox metrics.
@ -150,7 +149,7 @@ func (s *service) startManagementServer(ctx context.Context, ociSpec *specs.Spec
shimMgtLog.Info("kata management inited") shimMgtLog.Info("kata management inited")
// bind hanlder // bind handler
m := http.NewServeMux() m := http.NewServeMux()
m.Handle("/metrics", http.HandlerFunc(s.serveMetrics)) m.Handle("/metrics", http.HandlerFunc(s.serveMetrics))
m.Handle("/agent-url", http.HandlerFunc(s.agentURL)) m.Handle("/agent-url", http.HandlerFunc(s.agentURL))

View File

@ -177,7 +177,7 @@ func calcOverhead(initialSandboxStats, finishSandboxStats vc.SandboxStats, initi
cpuUsageGuest := float64(guestFinalCPU-guestInitCPU) / deltaTime * 100 cpuUsageGuest := float64(guestFinalCPU-guestInitCPU) / deltaTime * 100
cpuUsageHost := float64(hostFinalCPU-hostInitCPU) / deltaTime * 100 cpuUsageHost := float64(hostFinalCPU-hostInitCPU) / deltaTime * 100
return float64(hostMemoryUsage - guestMemoryUsage), float64(cpuUsageHost - cpuUsageGuest) return float64(hostMemoryUsage - guestMemoryUsage), cpuUsageHost - cpuUsageGuest
} }
func (s *service) getPodOverhead(ctx context.Context) (float64, float64, error) { func (s *service) getPodOverhead(ctx context.Context) (float64, float64, error) {

View File

@ -97,7 +97,7 @@ func TestStatsSandbox(t *testing.T) {
sandbox.StatsFunc = getSandboxCPUFunc(2000, 110000) sandbox.StatsFunc = getSandboxCPUFunc(2000, 110000)
sandbox.StatsContainerFunc = getStatsContainerCPUFunc(200, 400, 20000, 40000) sandbox.StatsContainerFunc = getStatsContainerCPUFunc(200, 400, 20000, 40000)
finishSandboxStats, finishContainersStats, err := s.statsSandbox(context.Background()) finishSandboxStats, finishContainersStats, _ := s.statsSandbox(context.Background())
// calc overhead // calc overhead
mem, cpu := calcOverhead(initialSandboxStats, finishSandboxStats, initialContainerStats, finishContainersStats, 1e9) mem, cpu := calcOverhead(initialSandboxStats, finishSandboxStats, initialContainerStats, finishContainersStats, 1e9)

View File

@ -107,14 +107,14 @@ func TestEncodeMetricFamily(t *testing.T) {
scrapeCount.Inc() scrapeCount.Inc()
scrapeCount.Inc() scrapeCount.Inc()
mfs, err := prometheus.DefaultGatherer.Gather() mfs, _ := prometheus.DefaultGatherer.Gather()
// create encoder // create encoder
buf := bytes.NewBufferString("") buf := bytes.NewBufferString("")
encoder := expfmt.NewEncoder(buf, expfmt.FmtText) encoder := expfmt.NewEncoder(buf, expfmt.FmtText)
// encode metrics to text format // encode metrics to text format
err = encodeMetricFamily(mfs, encoder) err := encodeMetricFamily(mfs, encoder)
assert.Nil(err, "encodeMetricFamily should not return error") assert.Nil(err, "encodeMetricFamily should not return error")
// here will be to many metrics, // here will be to many metrics,

View File

@ -38,7 +38,7 @@ type KataMonitor struct {
// NewKataMonitor create and return a new KataMonitor instance // NewKataMonitor create and return a new KataMonitor instance
func NewKataMonitor(containerdAddr, containerdConfigFile string) (*KataMonitor, error) { func NewKataMonitor(containerdAddr, containerdConfigFile string) (*KataMonitor, error) {
if containerdAddr == "" { if containerdAddr == "" {
return nil, fmt.Errorf("Containerd serve address missing.") return nil, fmt.Errorf("containerd serve address missing")
} }
containerdConf := &srvconfig.Config{ containerdConf := &srvconfig.Config{
@ -82,7 +82,7 @@ func (km *KataMonitor) initSandboxCache() error {
// GetAgentURL returns agent URL // GetAgentURL returns agent URL
func (km *KataMonitor) GetAgentURL(w http.ResponseWriter, r *http.Request) { func (km *KataMonitor) GetAgentURL(w http.ResponseWriter, r *http.Request) {
sandboxID, err := getSandboxIdFromReq(r) sandboxID, err := getSandboxIDFromReq(r)
if err != nil { if err != nil {
commonServeError(w, http.StatusBadRequest, err) commonServeError(w, http.StatusBadRequest, err)
return return

View File

@ -21,7 +21,7 @@ func serveError(w http.ResponseWriter, status int, txt string) {
} }
func (km *KataMonitor) composeSocketAddress(r *http.Request) (string, error) { func (km *KataMonitor) composeSocketAddress(r *http.Request) (string, error) {
sandbox, err := getSandboxIdFromReq(r) sandbox, err := getSandboxIDFromReq(r)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -157,7 +157,7 @@ func (sc *sandboxCache) startEventsListener(addr string) error {
// if the container is a sandbox container, // if the container is a sandbox container,
// means the VM is started, and can start to collect metrics from the VM. // means the VM is started, and can start to collect metrics from the VM.
if isSandboxContainer(&c) { if isSandboxContainer(&c) {
// we can simply put the contaienrid in sandboxes list if the conatiner is a sandbox container // we can simply put the contaienrid in sandboxes list if the container is a sandbox container
sc.putIfNotExists(cc.ID, e.Namespace) sc.putIfNotExists(cc.ID, e.Namespace)
monitorLog.WithField("container", cc.ID).Info("add sandbox to cache") monitorLog.WithField("container", cc.ID).Info("add sandbox to cache")
} }

View File

@ -25,7 +25,7 @@ func commonServeError(w http.ResponseWriter, status int, err error) {
} }
} }
func getSandboxIdFromReq(r *http.Request) (string, error) { func getSandboxIDFromReq(r *http.Request) (string, error) {
sandbox := r.URL.Query().Get("sandbox") sandbox := r.URL.Query().Get("sandbox")
if sandbox != "" { if sandbox != "" {
return sandbox, nil return sandbox, nil

View File

@ -17,7 +17,6 @@ var defaultInitrdPath = "/usr/share/kata-containers/kata-containers-initrd.img"
var defaultFirmwarePath = "" var defaultFirmwarePath = ""
var defaultMachineAccelerators = "" var defaultMachineAccelerators = ""
var defaultCPUFeatures = "" var defaultCPUFeatures = ""
var defaultShimPath = "/usr/libexec/kata-containers/kata-shim"
var systemdUnitName = "kata-containers.target" var systemdUnitName = "kata-containers.target"
const defaultKernelParams = "" const defaultKernelParams = ""

View File

@ -72,12 +72,9 @@ type factory struct {
type hypervisor struct { type hypervisor struct {
Path string `toml:"path"` Path string `toml:"path"`
HypervisorPathList []string `toml:"valid_hypervisor_paths"`
JailerPath string `toml:"jailer_path"` JailerPath string `toml:"jailer_path"`
JailerPathList []string `toml:"valid_jailer_paths"`
Kernel string `toml:"kernel"` Kernel string `toml:"kernel"`
CtlPath string `toml:"ctlpath"` CtlPath string `toml:"ctlpath"`
CtlPathList []string `toml:"valid_ctlpaths"`
Initrd string `toml:"initrd"` Initrd string `toml:"initrd"`
Image string `toml:"image"` Image string `toml:"image"`
Firmware string `toml:"firmware"` Firmware string `toml:"firmware"`
@ -89,17 +86,23 @@ type hypervisor struct {
EntropySource string `toml:"entropy_source"` EntropySource string `toml:"entropy_source"`
SharedFS string `toml:"shared_fs"` SharedFS string `toml:"shared_fs"`
VirtioFSDaemon string `toml:"virtio_fs_daemon"` VirtioFSDaemon string `toml:"virtio_fs_daemon"`
VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"`
VirtioFSCache string `toml:"virtio_fs_cache"` VirtioFSCache string `toml:"virtio_fs_cache"`
VhostUserStorePath string `toml:"vhost_user_store_path"`
FileBackedMemRootDir string `toml:"file_mem_backend"`
GuestHookPath string `toml:"guest_hook_path"`
GuestMemoryDumpPath string `toml:"guest_memory_dump_path"`
HypervisorPathList []string `toml:"valid_hypervisor_paths"`
JailerPathList []string `toml:"valid_jailer_paths"`
CtlPathList []string `toml:"valid_ctlpaths"`
VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"`
VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"`
PFlashList []string `toml:"pflashes"` PFlashList []string `toml:"pflashes"`
VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"`
BlockDeviceCacheSet bool `toml:"block_device_cache_set"`
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
EnableVhostUserStore bool `toml:"enable_vhost_user_store"`
VhostUserStorePath string `toml:"vhost_user_store_path"`
VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"` VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"`
FileBackedMemRootList []string `toml:"valid_file_mem_backends"`
EnableAnnotations []string `toml:"enable_annotations"`
RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"`
TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"`
VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"`
NumVCPUs int32 `toml:"default_vcpus"` NumVCPUs int32 `toml:"default_vcpus"`
DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"` DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"`
MemorySize uint32 `toml:"default_memory"` MemorySize uint32 `toml:"default_memory"`
@ -108,14 +111,16 @@ type hypervisor struct {
DefaultBridges uint32 `toml:"default_bridges"` DefaultBridges uint32 `toml:"default_bridges"`
Msize9p uint32 `toml:"msize_9p"` Msize9p uint32 `toml:"msize_9p"`
PCIeRootPort uint32 `toml:"pcie_root_port"` PCIeRootPort uint32 `toml:"pcie_root_port"`
BlockDeviceCacheSet bool `toml:"block_device_cache_set"`
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
EnableVhostUserStore bool `toml:"enable_vhost_user_store"`
DisableBlockDeviceUse bool `toml:"disable_block_device_use"` DisableBlockDeviceUse bool `toml:"disable_block_device_use"`
MemPrealloc bool `toml:"enable_mem_prealloc"` MemPrealloc bool `toml:"enable_mem_prealloc"`
HugePages bool `toml:"enable_hugepages"` HugePages bool `toml:"enable_hugepages"`
VirtioMem bool `toml:"enable_virtio_mem"` VirtioMem bool `toml:"enable_virtio_mem"`
IOMMU bool `toml:"enable_iommu"` IOMMU bool `toml:"enable_iommu"`
IOMMUPlatform bool `toml:"enable_iommu_platform"` IOMMUPlatform bool `toml:"enable_iommu_platform"`
FileBackedMemRootDir string `toml:"file_mem_backend"`
FileBackedMemRootList []string `toml:"valid_file_mem_backends"`
Swap bool `toml:"enable_swap"` Swap bool `toml:"enable_swap"`
Debug bool `toml:"enable_debug"` Debug bool `toml:"enable_debug"`
DisableNestingChecks bool `toml:"disable_nesting_checks"` DisableNestingChecks bool `toml:"disable_nesting_checks"`
@ -123,35 +128,30 @@ type hypervisor struct {
DisableImageNvdimm bool `toml:"disable_image_nvdimm"` DisableImageNvdimm bool `toml:"disable_image_nvdimm"`
HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"` HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"`
DisableVhostNet bool `toml:"disable_vhost_net"` DisableVhostNet bool `toml:"disable_vhost_net"`
GuestHookPath string `toml:"guest_hook_path"`
RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"`
TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"`
EnableAnnotations []string `toml:"enable_annotations"`
GuestMemoryDumpPath string `toml:"guest_memory_dump_path"`
GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"` GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"`
} }
type runtime struct { type runtime struct {
InterNetworkModel string `toml:"internetworking_model"`
JaegerEndpoint string `toml:"jaeger_endpoint"`
JaegerUser string `toml:"jaeger_user"`
JaegerPassword string `toml:"jaeger_password"`
SandboxBindMounts []string `toml:"sandbox_bind_mounts"`
Experimental []string `toml:"experimental"`
Debug bool `toml:"enable_debug"` Debug bool `toml:"enable_debug"`
Tracing bool `toml:"enable_tracing"` Tracing bool `toml:"enable_tracing"`
DisableNewNetNs bool `toml:"disable_new_netns"` DisableNewNetNs bool `toml:"disable_new_netns"`
DisableGuestSeccomp bool `toml:"disable_guest_seccomp"` DisableGuestSeccomp bool `toml:"disable_guest_seccomp"`
SandboxCgroupOnly bool `toml:"sandbox_cgroup_only"` SandboxCgroupOnly bool `toml:"sandbox_cgroup_only"`
SandboxBindMounts []string `toml:"sandbox_bind_mounts"`
Experimental []string `toml:"experimental"`
InterNetworkModel string `toml:"internetworking_model"`
EnablePprof bool `toml:"enable_pprof"` EnablePprof bool `toml:"enable_pprof"`
JaegerEndpoint string `toml:"jaeger_endpoint"`
JaegerUser string `toml:"jaeger_user"`
JaegerPassword string `toml:"jaeger_password"`
} }
type agent struct { type agent struct {
Debug bool `toml:"enable_debug"`
Tracing bool `toml:"enable_tracing"`
TraceMode string `toml:"trace_mode"` TraceMode string `toml:"trace_mode"`
TraceType string `toml:"trace_type"` TraceType string `toml:"trace_type"`
KernelModules []string `toml:"kernel_modules"` KernelModules []string `toml:"kernel_modules"`
Debug bool `toml:"enable_debug"`
Tracing bool `toml:"enable_tracing"`
DebugConsoleEnabled bool `toml:"debug_console_enabled"` DebugConsoleEnabled bool `toml:"debug_console_enabled"`
} }
@ -449,20 +449,12 @@ func (h hypervisor) getInitrdAndImage() (initrd string, image string, err error)
return return
} }
func (h hypervisor) getRxRateLimiterCfg() (uint64, error) { func (h hypervisor) getRxRateLimiterCfg() uint64 {
if h.RxRateLimiterMaxRate < 0 { return h.RxRateLimiterMaxRate
return 0, fmt.Errorf("rx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.RxRateLimiterMaxRate)
}
return h.RxRateLimiterMaxRate, nil
} }
func (h hypervisor) getTxRateLimiterCfg() (uint64, error) { func (h hypervisor) getTxRateLimiterCfg() uint64 {
if h.TxRateLimiterMaxRate < 0 { return h.TxRateLimiterMaxRate
return 0, fmt.Errorf("tx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.TxRateLimiterMaxRate)
}
return h.TxRateLimiterMaxRate, nil
} }
func (h hypervisor) getIOMMUPlatform() bool { func (h hypervisor) getIOMMUPlatform() bool {
@ -547,15 +539,8 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
return vc.HypervisorConfig{}, err return vc.HypervisorConfig{}, err
} }
rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg() rxRateLimiterMaxRate := h.getRxRateLimiterCfg()
if err != nil { txRateLimiterMaxRate := h.getTxRateLimiterCfg()
return vc.HypervisorConfig{}, err
}
txRateLimiterMaxRate, err := h.getTxRateLimiterCfg()
if err != nil {
return vc.HypervisorConfig{}, err
}
return vc.HypervisorConfig{ return vc.HypervisorConfig{
HypervisorPath: hypervisor, HypervisorPath: hypervisor,
@ -656,15 +641,8 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) {
return vc.HypervisorConfig{}, err return vc.HypervisorConfig{}, err
} }
rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg() rxRateLimiterMaxRate := h.getRxRateLimiterCfg()
if err != nil { txRateLimiterMaxRate := h.getTxRateLimiterCfg()
return vc.HypervisorConfig{}, err
}
txRateLimiterMaxRate, err := h.getTxRateLimiterCfg()
if err != nil {
return vc.HypervisorConfig{}, err
}
return vc.HypervisorConfig{ return vc.HypervisorConfig{
HypervisorPath: hypervisor, HypervisorPath: hypervisor,

View File

@ -420,7 +420,7 @@ func (a *Acrn) createSandbox(ctx context.Context, id string, networkNS NetworkNa
// startSandbox will start the Sandbox's VM. // startSandbox will start the Sandbox's VM.
func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error { func (a *Acrn) startSandbox(ctx context.Context, timeoutSecs int) error {
span, ctx := a.trace(ctx, "startSandbox") span, _ := a.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if a.config.Debug { if a.config.Debug {
@ -570,7 +570,7 @@ func (a *Acrn) updateBlockDevice(drive *config.BlockDrive) error {
} }
func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := a.trace(ctx, "hotplugAddDevice") span, _ := a.trace(ctx, "hotplugAddDevice")
defer span.End() defer span.End()
switch devType { switch devType {
@ -584,7 +584,7 @@ func (a *Acrn) hotplugAddDevice(ctx context.Context, devInfo interface{}, devTyp
} }
func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { func (a *Acrn) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := a.trace(ctx, "hotplugRemoveDevice") span, _ := a.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
// Not supported. return success // Not supported. return success

View File

@ -53,7 +53,7 @@ func SetLogger(ctx context.Context, logger *logrus.Entry) {
// CreateSandbox is the virtcontainers sandbox creation entry point. // CreateSandbox is the virtcontainers sandbox creation entry point.
// CreateSandbox creates a sandbox and its containers. It does not start them. // CreateSandbox creates a sandbox and its containers. It does not start them.
func CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (VCSandbox, error) { func CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (VCSandbox, error) {
span, ctx := trace(ctx, "CreateSandbox") span, _ := trace(ctx, "CreateSandbox")
defer span.End() defer span.End()
s, err := createSandboxFromConfig(ctx, sandboxConfig, factory) s, err := createSandboxFromConfig(ctx, sandboxConfig, factory)
@ -62,7 +62,7 @@ func CreateSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
} }
func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (_ *Sandbox, err error) { func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (_ *Sandbox, err error) {
span, ctx := trace(ctx, "createSandboxFromConfig") span, _ := trace(ctx, "createSandboxFromConfig")
defer span.End() defer span.End()
// Create the sandbox. // Create the sandbox.
@ -136,7 +136,7 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
// in the sandbox left, do stop the sandbox and delete it. Those serial operations will be done exclusively by // in the sandbox left, do stop the sandbox and delete it. Those serial operations will be done exclusively by
// locking the sandbox. // locking the sandbox.
func CleanupContainer(ctx context.Context, sandboxID, containerID string, force bool) error { func CleanupContainer(ctx context.Context, sandboxID, containerID string, force bool) error {
span, ctx := trace(ctx, "CleanupContainer") span, _ := trace(ctx, "CleanupContainer")
defer span.End() defer span.End()
if sandboxID == "" { if sandboxID == "" {

View File

@ -15,7 +15,6 @@ import (
"testing" "testing"
ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils" ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock"
@ -74,16 +73,6 @@ func newBasicTestCmd() types.Cmd {
return cmd return cmd
} }
func rmSandboxDir(sid string) error {
store, err := persist.GetDriver()
if err != nil {
return fmt.Errorf("failed to get fs persist driver: %v", err)
}
store.Destroy(sid)
return nil
}
func newTestSandboxConfigNoop() SandboxConfig { func newTestSandboxConfigNoop() SandboxConfig {
bundlePath := filepath.Join(testDir, testBundle) bundlePath := filepath.Join(testDir, testBundle)
containerAnnotations[annotations.BundlePathKey] = bundlePath containerAnnotations[annotations.BundlePathKey] = bundlePath
@ -207,26 +196,6 @@ func TestCreateSandboxFailing(t *testing.T) {
* Benchmarks * Benchmarks
*/ */
func createNewSandboxConfig(hType HypervisorType) SandboxConfig {
hypervisorConfig := HypervisorConfig{
KernelPath: "/usr/share/kata-containers/vmlinux.container",
ImagePath: "/usr/share/kata-containers/kata-containers.img",
HypervisorPath: "/usr/bin/qemu-system-x86_64",
}
netConfig := NetworkConfig{}
return SandboxConfig{
ID: testSandboxID,
HypervisorType: hType,
HypervisorConfig: hypervisorConfig,
AgentConfig: KataAgentConfig{},
NetworkConfig: netConfig,
}
}
func newTestContainerConfigNoop(contID string) ContainerConfig { func newTestContainerConfigNoop(contID string) ContainerConfig {
// Define the container command and bundle. // Define the container command and bundle.
container := ContainerConfig{ container := ContainerConfig{

View File

@ -339,7 +339,7 @@ func (clh *cloudHypervisor) createSandbox(ctx context.Context, id string, networ
// startSandbox will start the VMM and boot the virtual machine for the given sandbox. // startSandbox will start the VMM and boot the virtual machine for the given sandbox.
func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error { func (clh *cloudHypervisor) startSandbox(ctx context.Context, timeout int) error {
span, ctx := clh.trace(ctx, "startSandbox") span, _ := clh.trace(ctx, "startSandbox")
defer span.End() defer span.End()
ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second)
@ -492,7 +492,7 @@ func (clh *cloudHypervisor) hotplugAddDevice(ctx context.Context, devInfo interf
} }
func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { func (clh *cloudHypervisor) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := clh.trace(ctx, "hotplugRemoveDevice") span, _ := clh.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
var deviceID string var deviceID string
@ -576,7 +576,7 @@ func (clh *cloudHypervisor) resizeMemory(ctx context.Context, reqMemMB uint32, m
} }
cl := clh.client() cl := clh.client()
ctx, cancelResize := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) ctx, cancelResize := context.WithTimeout(ctx, clhAPITimeout*time.Second)
defer cancelResize() defer cancelResize()
// OpenApi does not support uint64, convert to int64 // OpenApi does not support uint64, convert to int64
@ -620,7 +620,7 @@ func (clh *cloudHypervisor) resizeVCPUs(ctx context.Context, reqVCPUs uint32) (c
} }
// Resize (hot-plug) vCPUs via HTTP API // Resize (hot-plug) vCPUs via HTTP API
ctx, cancel := context.WithTimeout(context.Background(), clhAPITimeout*time.Second) ctx, cancel := context.WithTimeout(ctx, clhAPITimeout*time.Second)
defer cancel() defer cancel()
if _, err = cl.VmResizePut(ctx, chclient.VmResize{DesiredVcpus: int32(reqVCPUs)}); err != nil { if _, err = cl.VmResizePut(ctx, chclient.VmResize{DesiredVcpus: int32(reqVCPUs)}); err != nil {
return currentVCPUs, newVCPUs, errors.Wrap(err, "[clh] VmResizePut failed") return currentVCPUs, newVCPUs, errors.Wrap(err, "[clh] VmResizePut failed")
@ -653,7 +653,7 @@ func (clh *cloudHypervisor) resumeSandbox(ctx context.Context) error {
// stopSandbox will stop the Sandbox's VM. // stopSandbox will stop the Sandbox's VM.
func (clh *cloudHypervisor) stopSandbox(ctx context.Context) (err error) { func (clh *cloudHypervisor) stopSandbox(ctx context.Context) (err error) {
span, ctx := clh.trace(ctx, "stopSandbox") span, _ := clh.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
clh.Logger().WithField("function", "stopSandbox").Info("Stop Sandbox") clh.Logger().WithField("function", "stopSandbox").Info("Stop Sandbox")
return clh.terminate(ctx) return clh.terminate(ctx)
@ -757,7 +757,7 @@ func (clh *cloudHypervisor) trace(parent context.Context, name string) (otelTrac
} }
func (clh *cloudHypervisor) terminate(ctx context.Context) (err error) { func (clh *cloudHypervisor) terminate(ctx context.Context) (err error) {
span, ctx := clh.trace(ctx, "terminate") span, _ := clh.trace(ctx, "terminate")
defer span.End() defer span.End()
pid := clh.state.PID pid := clh.state.PID

View File

@ -700,7 +700,7 @@ func (c *Container) createBlockDevices(ctx context.Context) error {
// newContainer creates a Container structure from a sandbox and a container configuration. // newContainer creates a Container structure from a sandbox and a container configuration.
func newContainer(ctx context.Context, sandbox *Sandbox, contConfig *ContainerConfig) (*Container, error) { func newContainer(ctx context.Context, sandbox *Sandbox, contConfig *ContainerConfig) (*Container, error) {
span, ctx := sandbox.trace(ctx, "newContainer") span, _ := sandbox.trace(ctx, "newContainer")
defer span.End() defer span.End()
if !contConfig.valid() { if !contConfig.valid() {

View File

@ -141,7 +141,7 @@ func (f *factory) checkConfig(config vc.VMConfig) error {
// GetVM returns a working blank VM created by the factory. // GetVM returns a working blank VM created by the factory.
func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) { func (f *factory) GetVM(ctx context.Context, config vc.VMConfig) (*vc.VM, error) {
span, ctx := trace(ctx, "GetVM") span, _ := trace(ctx, "GetVM")
defer span.End() defer span.End()
hypervisorConfig := config.HypervisorConfig hypervisorConfig := config.HypervisorConfig

View File

@ -200,7 +200,7 @@ func (fc *firecracker) createSandbox(ctx context.Context, id string, networkNS N
fc.ctx = ctx fc.ctx = ctx
var span otelTrace.Span var span otelTrace.Span
span, ctx = fc.trace(ctx, "createSandbox") span, _ = fc.trace(ctx, "createSandbox")
defer span.End() defer span.End()
//TODO: check validity of the hypervisor config provided //TODO: check validity of the hypervisor config provided
@ -325,7 +325,7 @@ func (fc *firecracker) checkVersion(version string) error {
// waitVMMRunning will wait for timeout seconds for the VMM to be up and running. // waitVMMRunning will wait for timeout seconds for the VMM to be up and running.
func (fc *firecracker) waitVMMRunning(ctx context.Context, timeout int) error { func (fc *firecracker) waitVMMRunning(ctx context.Context, timeout int) error {
span, ctx := fc.trace(ctx, "wait VMM to be running") span, _ := fc.trace(ctx, "wait VMM to be running")
defer span.End() defer span.End()
if timeout < 0 { if timeout < 0 {
@ -347,7 +347,7 @@ func (fc *firecracker) waitVMMRunning(ctx context.Context, timeout int) error {
} }
func (fc *firecracker) fcInit(ctx context.Context, timeout int) error { func (fc *firecracker) fcInit(ctx context.Context, timeout int) error {
span, ctx := fc.trace(ctx, "fcInit") span, _ := fc.trace(ctx, "fcInit")
defer span.End() defer span.End()
var err error var err error
@ -467,7 +467,7 @@ func (fc *firecracker) fcEnd(ctx context.Context) (err error) {
} }
func (fc *firecracker) client(ctx context.Context) *client.Firecracker { func (fc *firecracker) client(ctx context.Context) *client.Firecracker {
span, ctx := fc.trace(ctx, "client") span, _ := fc.trace(ctx, "client")
defer span.End() defer span.End()
if fc.connection == nil { if fc.connection == nil {
@ -762,7 +762,7 @@ func (fc *firecracker) fcInitConfiguration(ctx context.Context) error {
// In the context of firecracker, this will start the hypervisor, // In the context of firecracker, this will start the hypervisor,
// for configuration, but not yet start the actual virtual machine // for configuration, but not yet start the actual virtual machine
func (fc *firecracker) startSandbox(ctx context.Context, timeout int) error { func (fc *firecracker) startSandbox(ctx context.Context, timeout int) error {
span, ctx := fc.trace(ctx, "startSandbox") span, _ := fc.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if err := fc.fcInitConfiguration(ctx); err != nil { if err := fc.fcInitConfiguration(ctx); err != nil {
@ -875,7 +875,7 @@ func (fc *firecracker) cleanupJail(ctx context.Context) {
// stopSandbox will stop the Sandbox's VM. // stopSandbox will stop the Sandbox's VM.
func (fc *firecracker) stopSandbox(ctx context.Context) (err error) { func (fc *firecracker) stopSandbox(ctx context.Context) (err error) {
span, ctx := fc.trace(ctx, "stopSandbox") span, _ := fc.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
return fc.fcEnd(ctx) return fc.fcEnd(ctx)
@ -996,7 +996,7 @@ func (fc *firecracker) fcAddBlockDrive(ctx context.Context, drive config.BlockDr
// Firecracker supports replacing the host drive used once the VM has booted up // Firecracker supports replacing the host drive used once the VM has booted up
func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string) error { func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string) error {
span, ctx := fc.trace(ctx, "fcUpdateBlockDrive") span, _ := fc.trace(ctx, "fcUpdateBlockDrive")
defer span.End() defer span.End()
// Use the global block index as an index into the pool of the devices // Use the global block index as an index into the pool of the devices
@ -1020,7 +1020,7 @@ func (fc *firecracker) fcUpdateBlockDrive(ctx context.Context, path, id string)
// addDevice will add extra devices to firecracker. Limited to configure before the // addDevice will add extra devices to firecracker. Limited to configure before the
// virtual machine starts. Devices include drivers and network interfaces only. // virtual machine starts. Devices include drivers and network interfaces only.
func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error { func (fc *firecracker) addDevice(ctx context.Context, devInfo interface{}, devType deviceType) error {
span, ctx := fc.trace(ctx, "addDevice") span, _ := fc.trace(ctx, "addDevice")
defer span.End() defer span.End()
fc.state.RLock() fc.state.RLock()
@ -1081,7 +1081,7 @@ func (fc *firecracker) hotplugBlockDevice(ctx context.Context, drive config.Bloc
// hotplugAddDevice supported in Firecracker VMM // hotplugAddDevice supported in Firecracker VMM
func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := fc.trace(ctx, "hotplugAddDevice") span, _ := fc.trace(ctx, "hotplugAddDevice")
defer span.End() defer span.End()
switch devType { switch devType {
@ -1097,7 +1097,7 @@ func (fc *firecracker) hotplugAddDevice(ctx context.Context, devInfo interface{}
// hotplugRemoveDevice supported in Firecracker VMM // hotplugRemoveDevice supported in Firecracker VMM
func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { func (fc *firecracker) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := fc.trace(ctx, "hotplugRemoveDevice") span, _ := fc.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
switch devType { switch devType {
@ -1245,9 +1245,8 @@ func revertBytes(num uint64) uint64 {
b := num % 1000 b := num % 1000
if a == 0 { if a == 0 {
return num return num
} else {
return 1024*revertBytes(a) + b
} }
return 1024*revertBytes(a) + b
} }
func (fc *firecracker) setSandbox(sandbox *Sandbox) { func (fc *firecracker) setSandbox(sandbox *Sandbox) {

View File

@ -174,11 +174,11 @@ func registerFirecrackerMetrics() {
// updateFirecrackerMetrics update all metrics to the latest values. // updateFirecrackerMetrics update all metrics to the latest values.
func updateFirecrackerMetrics(fm *FirecrackerMetrics) { func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
// set metrics for ApiServerMetrics // set metrics for APIServerMetrics
apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.ApiServer.ProcessStartupTimeUs)) apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.APIServer.ProcessStartupTimeUs))
apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.ApiServer.ProcessStartupTimeCpuUs)) apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.APIServer.ProcessStartupTimeCPUUs))
apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.ApiServer.SyncResponseFails)) apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.APIServer.SyncResponseFails))
apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.ApiServer.SyncVmmSendTimeoutCount)) apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.APIServer.SyncVmmSendTimeoutCount))
// set metrics for BlockDeviceMetrics // set metrics for BlockDeviceMetrics
blockDeviceMetrics.WithLabelValues("activate_fails").Set(float64(fm.Block.ActivateFails)) blockDeviceMetrics.WithLabelValues("activate_fails").Set(float64(fm.Block.ActivateFails))
@ -199,10 +199,10 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
blockDeviceMetrics.WithLabelValues("rate_limiter_throttled_events").Set(float64(fm.Block.RateLimiterThrottledEvents)) blockDeviceMetrics.WithLabelValues("rate_limiter_throttled_events").Set(float64(fm.Block.RateLimiterThrottledEvents))
// set metrics for GetRequestsMetrics // set metrics for GetRequestsMetrics
getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetApiRequests.InstanceInfoCount)) getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetAPIRequests.InstanceInfoCount))
getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetApiRequests.InstanceInfoFails)) getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetAPIRequests.InstanceInfoFails))
getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetApiRequests.MachineCfgCount)) getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetAPIRequests.MachineCfgCount))
getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetApiRequests.MachineCfgFails)) getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetAPIRequests.MachineCfgFails))
// set metrics for I8042DeviceMetrics // set metrics for I8042DeviceMetrics
i8042DeviceMetrics.WithLabelValues("error_count").Set(float64(fm.I8042.ErrorCount)) i8042DeviceMetrics.WithLabelValues("error_count").Set(float64(fm.I8042.ErrorCount))
@ -216,13 +216,13 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
performanceMetrics.WithLabelValues("full_create_snapshot").Set(float64(fm.LatenciesUs.FullCreateSnapshot)) performanceMetrics.WithLabelValues("full_create_snapshot").Set(float64(fm.LatenciesUs.FullCreateSnapshot))
performanceMetrics.WithLabelValues("diff_create_snapshot").Set(float64(fm.LatenciesUs.DiffCreateSnapshot)) performanceMetrics.WithLabelValues("diff_create_snapshot").Set(float64(fm.LatenciesUs.DiffCreateSnapshot))
performanceMetrics.WithLabelValues("load_snapshot").Set(float64(fm.LatenciesUs.LoadSnapshot)) performanceMetrics.WithLabelValues("load_snapshot").Set(float64(fm.LatenciesUs.LoadSnapshot))
performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVm)) performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVM))
performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVm)) performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVM))
performanceMetrics.WithLabelValues("vmm_full_create_snapshot").Set(float64(fm.LatenciesUs.VmmFullCreateSnapshot)) performanceMetrics.WithLabelValues("vmm_full_create_snapshot").Set(float64(fm.LatenciesUs.VmmFullCreateSnapshot))
performanceMetrics.WithLabelValues("vmm_diff_create_snapshot").Set(float64(fm.LatenciesUs.VmmDiffCreateSnapshot)) performanceMetrics.WithLabelValues("vmm_diff_create_snapshot").Set(float64(fm.LatenciesUs.VmmDiffCreateSnapshot))
performanceMetrics.WithLabelValues("vmm_load_snapshot").Set(float64(fm.LatenciesUs.VmmLoadSnapshot)) performanceMetrics.WithLabelValues("vmm_load_snapshot").Set(float64(fm.LatenciesUs.VmmLoadSnapshot))
performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVm)) performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVM))
performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVm)) performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVM))
// set metrics for LoggerSystemMetrics // set metrics for LoggerSystemMetrics
loggerSystemMetrics.WithLabelValues("missed_metrics_count").Set(float64(fm.Logger.MissedMetricsCount)) loggerSystemMetrics.WithLabelValues("missed_metrics_count").Set(float64(fm.Logger.MissedMetricsCount))
@ -273,28 +273,28 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
netDeviceMetrics.WithLabelValues("tx_spoofed_mac_count").Set(float64(fm.Net.TxSpoofedMacCount)) netDeviceMetrics.WithLabelValues("tx_spoofed_mac_count").Set(float64(fm.Net.TxSpoofedMacCount))
// set metrics for PatchRequestsMetrics // set metrics for PatchRequestsMetrics
patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchApiRequests.DriveCount)) patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchAPIRequests.DriveCount))
patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchApiRequests.DriveFails)) patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchAPIRequests.DriveFails))
patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchApiRequests.NetworkCount)) patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchAPIRequests.NetworkCount))
patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchApiRequests.NetworkFails)) patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchAPIRequests.NetworkFails))
patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchApiRequests.MachineCfgCount)) patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchAPIRequests.MachineCfgCount))
patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchApiRequests.MachineCfgFails)) patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchAPIRequests.MachineCfgFails))
// set metrics for PutRequestsMetrics // set metrics for PutRequestsMetrics
putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutApiRequests.ActionsCount)) putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutAPIRequests.ActionsCount))
putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutApiRequests.ActionsFails)) putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutAPIRequests.ActionsFails))
putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutApiRequests.BootSourceCount)) putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutAPIRequests.BootSourceCount))
putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutApiRequests.BootSourceFails)) putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutAPIRequests.BootSourceFails))
putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutApiRequests.DriveCount)) putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutAPIRequests.DriveCount))
putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutApiRequests.DriveFails)) putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutAPIRequests.DriveFails))
putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutApiRequests.LoggerCount)) putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutAPIRequests.LoggerCount))
putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutApiRequests.LoggerFails)) putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutAPIRequests.LoggerFails))
putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutApiRequests.MachineCfgCount)) putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutAPIRequests.MachineCfgCount))
putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutApiRequests.MachineCfgFails)) putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutAPIRequests.MachineCfgFails))
putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutApiRequests.MetricsCount)) putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutAPIRequests.MetricsCount))
putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutApiRequests.MetricsFails)) putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutAPIRequests.MetricsFails))
putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutApiRequests.NetworkCount)) putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutAPIRequests.NetworkCount))
putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutApiRequests.NetworkFails)) putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutAPIRequests.NetworkFails))
// set metrics for RTCDeviceMetrics // set metrics for RTCDeviceMetrics
rTCDeviceMetrics.WithLabelValues("error_count").Set(float64(fm.Rtc.ErrorCount)) rTCDeviceMetrics.WithLabelValues("error_count").Set(float64(fm.Rtc.ErrorCount))
@ -310,7 +310,7 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
vcpuMetrics.WithLabelValues("exit_mmio_read").Set(float64(fm.Vcpu.ExitMmioRead)) vcpuMetrics.WithLabelValues("exit_mmio_read").Set(float64(fm.Vcpu.ExitMmioRead))
vcpuMetrics.WithLabelValues("exit_mmio_write").Set(float64(fm.Vcpu.ExitMmioWrite)) vcpuMetrics.WithLabelValues("exit_mmio_write").Set(float64(fm.Vcpu.ExitMmioWrite))
vcpuMetrics.WithLabelValues("failures").Set(float64(fm.Vcpu.Failures)) vcpuMetrics.WithLabelValues("failures").Set(float64(fm.Vcpu.Failures))
vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCpuid)) vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCPUid))
// set metrics for VmmMetrics // set metrics for VmmMetrics
vmmMetrics.WithLabelValues("device_events").Set(float64(fm.Vmm.DeviceEvents)) vmmMetrics.WithLabelValues("device_events").Set(float64(fm.Vmm.DeviceEvents))
@ -355,11 +355,11 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) {
// Structure storing all metrics while enforcing serialization support on them. // Structure storing all metrics while enforcing serialization support on them.
type FirecrackerMetrics struct { type FirecrackerMetrics struct {
// API Server related metrics. // API Server related metrics.
ApiServer ApiServerMetrics `json:"api_server"` APIServer APIServerMetrics `json:"api_server"`
// A block device's related metrics. // A block device's related metrics.
Block BlockDeviceMetrics `json:"block"` Block BlockDeviceMetrics `json:"block"`
// Metrics related to API GET requests. // Metrics related to API GET requests.
GetApiRequests GetRequestsMetrics `json:"get_api_requests"` GetAPIRequests GetRequestsMetrics `json:"get_api_requests"`
// Metrics related to the i8042 device. // Metrics related to the i8042 device.
I8042 I8042DeviceMetrics `json:"i8042"` I8042 I8042DeviceMetrics `json:"i8042"`
// Metrics related to performance measurements. // Metrics related to performance measurements.
@ -371,9 +371,9 @@ type FirecrackerMetrics struct {
// A network device's related metrics. // A network device's related metrics.
Net NetDeviceMetrics `json:"net"` Net NetDeviceMetrics `json:"net"`
// Metrics related to API PATCH requests. // Metrics related to API PATCH requests.
PatchApiRequests PatchRequestsMetrics `json:"patch_api_requests"` PatchAPIRequests PatchRequestsMetrics `json:"patch_api_requests"`
// Metrics related to API PUT requests. // Metrics related to API PUT requests.
PutApiRequests PutRequestsMetrics `json:"put_api_requests"` PutAPIRequests PutRequestsMetrics `json:"put_api_requests"`
// Metrics related to the RTC device. // Metrics related to the RTC device.
Rtc RTCDeviceMetrics `json:"rtc"` Rtc RTCDeviceMetrics `json:"rtc"`
// Metrics related to seccomp filtering. // Metrics related to seccomp filtering.
@ -391,11 +391,11 @@ type FirecrackerMetrics struct {
} }
// API Server related metrics. // API Server related metrics.
type ApiServerMetrics struct { type APIServerMetrics struct {
// Measures the process's startup time in microseconds. // Measures the process's startup time in microseconds.
ProcessStartupTimeUs uint64 `json:"process_startup_time_us"` ProcessStartupTimeUs uint64 `json:"process_startup_time_us"`
// Measures the cpu's startup time in microseconds. // Measures the cpu's startup time in microseconds.
ProcessStartupTimeCpuUs uint64 `json:"process_startup_time_cpu_us"` ProcessStartupTimeCPUUs uint64 `json:"process_startup_time_cpu_us"`
// Number of failures on API requests triggered by internal errors. // Number of failures on API requests triggered by internal errors.
SyncResponseFails uint64 `json:"sync_response_fails"` SyncResponseFails uint64 `json:"sync_response_fails"`
// Number of timeouts during communication with the VMM. // Number of timeouts during communication with the VMM.
@ -475,9 +475,9 @@ type PerformanceMetrics struct {
// Measures the snapshot load time, at the API (user) level, in microseconds. // Measures the snapshot load time, at the API (user) level, in microseconds.
LoadSnapshot uint64 `json:"load_snapshot"` LoadSnapshot uint64 `json:"load_snapshot"`
// Measures the microVM pausing duration, at the API (user) level, in microseconds. // Measures the microVM pausing duration, at the API (user) level, in microseconds.
PauseVm uint64 `json:"pause_vm"` PauseVM uint64 `json:"pause_vm"`
// Measures the microVM resuming duration, at the API (user) level, in microseconds. // Measures the microVM resuming duration, at the API (user) level, in microseconds.
ResumeVm uint64 `json:"resume_vm"` ResumeVM uint64 `json:"resume_vm"`
// Measures the snapshot full create time, at the VMM level, in microseconds. // Measures the snapshot full create time, at the VMM level, in microseconds.
VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"` VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"`
// Measures the snapshot diff create time, at the VMM level, in microseconds. // Measures the snapshot diff create time, at the VMM level, in microseconds.
@ -485,9 +485,9 @@ type PerformanceMetrics struct {
// Measures the snapshot load time, at the VMM level, in microseconds. // Measures the snapshot load time, at the VMM level, in microseconds.
VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"` VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"`
// Measures the microVM pausing duration, at the VMM level, in microseconds. // Measures the microVM pausing duration, at the VMM level, in microseconds.
VmmPauseVm uint64 `json:"vmm_pause_vm"` VmmPauseVM uint64 `json:"vmm_pause_vm"`
// Measures the microVM resuming duration, at the VMM level, in microseconds. // Measures the microVM resuming duration, at the VMM level, in microseconds.
VmmResumeVm uint64 `json:"vmm_resume_vm"` VmmResumeVM uint64 `json:"vmm_resume_vm"`
} }
// Logging related metrics. // Logging related metrics.
@ -662,7 +662,7 @@ type VcpuMetrics struct {
// Number of errors during this VCPU's run. // Number of errors during this VCPU's run.
Failures uint64 `json:"failures"` Failures uint64 `json:"failures"`
// Failures in configuring the CPUID. // Failures in configuring the CPUID.
FilterCpuid uint64 `json:"filter_cpuid"` FilterCPUid uint64 `json:"filter_cpuid"`
} }
// Metrics related to the virtual machine manager. // Metrics related to the virtual machine manager.

View File

@ -26,7 +26,6 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups"
ns "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/nsenter"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless"
vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types" vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/uuid" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/uuid"
@ -496,7 +495,7 @@ func (k *kataAgent) setupSharedPath(ctx context.Context, sandbox *Sandbox) error
} }
func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error { func (k *kataAgent) createSandbox(ctx context.Context, sandbox *Sandbox) error {
span, ctx := k.trace(ctx, "createSandbox") span, _ := k.trace(ctx, "createSandbox")
defer span.End() defer span.End()
if err := k.setupSharedPath(ctx, sandbox); err != nil { if err := k.setupSharedPath(ctx, sandbox); err != nil {
@ -583,7 +582,7 @@ func cmdEnvsToStringSlice(ev []types.EnvVar) []string {
} }
func (k *kataAgent) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) { func (k *kataAgent) exec(ctx context.Context, sandbox *Sandbox, c Container, cmd types.Cmd) (*Process, error) {
span, ctx := k.trace(ctx, "exec") span, _ := k.trace(ctx, "exec")
defer span.End() defer span.End()
var kataProcess *grpc.Process var kataProcess *grpc.Process
@ -755,7 +754,7 @@ func (k *kataAgent) getDNS(sandbox *Sandbox) ([]string, error) {
} }
func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error { func (k *kataAgent) startSandbox(ctx context.Context, sandbox *Sandbox) error {
span, ctx := k.trace(ctx, "startSandbox") span, _ := k.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if err := k.setAgentURL(); err != nil { if err := k.setAgentURL(); err != nil {
@ -910,7 +909,7 @@ func setupStorages(ctx context.Context, sandbox *Sandbox) []*grpc.Storage {
} }
func (k *kataAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error { func (k *kataAgent) stopSandbox(ctx context.Context, sandbox *Sandbox) error {
span, ctx := k.trace(ctx, "stopSandbox") span, _ := k.trace(ctx, "stopSandbox")
defer span.End() defer span.End()
req := &grpc.DestroySandboxRequest{} req := &grpc.DestroySandboxRequest{}
@ -1272,7 +1271,7 @@ func (k *kataAgent) buildContainerRootfs(ctx context.Context, sandbox *Sandbox,
} }
func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (p *Process, err error) { func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Container) (p *Process, err error) {
span, ctx := k.trace(ctx, "createContainer") span, _ := k.trace(ctx, "createContainer")
defer span.End() defer span.End()
var ctrStorages []*grpc.Storage var ctrStorages []*grpc.Storage
@ -1384,14 +1383,6 @@ func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Co
return nil, err return nil, err
} }
enterNSList := []ns.Namespace{}
if sandbox.networkNS.NetNsPath != "" {
enterNSList = append(enterNSList, ns.Namespace{
Path: sandbox.networkNS.NetNsPath,
Type: ns.NSTypeNet,
})
}
return buildProcessFromExecID(req.ExecId) return buildProcessFromExecID(req.ExecId)
} }
@ -1602,7 +1593,7 @@ func (k *kataAgent) handlePidNamespace(grpcSpec *grpc.Spec, sandbox *Sandbox) bo
} }
func (k *kataAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error { func (k *kataAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Container) error {
span, ctx := k.trace(ctx, "startContainer") span, _ := k.trace(ctx, "startContainer")
defer span.End() defer span.End()
req := &grpc.StartContainerRequest{ req := &grpc.StartContainerRequest{
@ -1614,7 +1605,7 @@ func (k *kataAgent) startContainer(ctx context.Context, sandbox *Sandbox, c *Con
} }
func (k *kataAgent) stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error { func (k *kataAgent) stopContainer(ctx context.Context, sandbox *Sandbox, c Container) error {
span, ctx := k.trace(ctx, "stopContainer") span, _ := k.trace(ctx, "stopContainer")
defer span.End() defer span.End()
_, err := k.sendReq(ctx, &grpc.RemoveContainerRequest{ContainerId: c.id}) _, err := k.sendReq(ctx, &grpc.RemoveContainerRequest{ContainerId: c.id})
@ -1778,7 +1769,7 @@ func (k *kataAgent) connect(ctx context.Context) error {
return nil return nil
} }
span, ctx := k.trace(ctx, "connect") span, _ := k.trace(ctx, "connect")
defer span.End() defer span.End()
// This is for the first connection only, to prevent race // This is for the first connection only, to prevent race
@ -1824,7 +1815,7 @@ func (k *kataAgent) disconnect(ctx context.Context) error {
// check grpc server is serving // check grpc server is serving
func (k *kataAgent) check(ctx context.Context) error { func (k *kataAgent) check(ctx context.Context) error {
span, ctx := k.trace(ctx, "check") span, _ := k.trace(ctx, "check")
defer span.End() defer span.End()
_, err := k.sendReq(ctx, &grpc.CheckRequest{}) _, err := k.sendReq(ctx, &grpc.CheckRequest{})
@ -1835,7 +1826,7 @@ func (k *kataAgent) check(ctx context.Context) error {
} }
func (k *kataAgent) waitProcess(ctx context.Context, c *Container, processID string) (int32, error) { func (k *kataAgent) waitProcess(ctx context.Context, c *Container, processID string) (int32, error) {
span, ctx := k.trace(ctx, "waitProcess") span, _ := k.trace(ctx, "waitProcess")
defer span.End() defer span.End()
resp, err := k.sendReq(ctx, &grpc.WaitProcessRequest{ resp, err := k.sendReq(ctx, &grpc.WaitProcessRequest{
@ -2022,7 +2013,7 @@ func (k *kataAgent) sendReq(spanCtx context.Context, request interface{}) (inter
k.Logger().WithField("name", msgName).WithField("req", message.String()).Debug("sending request") k.Logger().WithField("name", msgName).WithField("req", message.String()).Debug("sending request")
defer func() { defer func() {
agentRpcDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond))) agentRPCDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond)))
}() }()
return handler(ctx, request) return handler(ctx, request)
} }

View File

@ -360,6 +360,7 @@ func TestHandleBlockVolume(t *testing.T) {
bPCIPath, err := vcTypes.PciPathFromString("03/04") bPCIPath, err := vcTypes.PciPathFromString("03/04")
assert.NoError(t, err) assert.NoError(t, err)
dPCIPath, err := vcTypes.PciPathFromString("04/05") dPCIPath, err := vcTypes.PciPathFromString("04/05")
assert.NoError(t, err)
vDev := drivers.NewVhostUserBlkDevice(&config.DeviceInfo{ID: vDevID}) vDev := drivers.NewVhostUserBlkDevice(&config.DeviceInfo{ID: vDevID})
bDev := drivers.NewBlockDevice(&config.DeviceInfo{ID: bDevID}) bDev := drivers.NewBlockDevice(&config.DeviceInfo{ID: bDevID})

View File

@ -22,6 +22,7 @@ import (
type mockAgent struct { type mockAgent struct {
} }
// nolint:golint
func NewMockAgent() agent { func NewMockAgent() agent {
return &mockAgent{} return &mockAgent{}
} }
@ -237,6 +238,6 @@ func (n *mockAgent) getOOMEvent(ctx context.Context) (string, error) {
return "", nil return "", nil
} }
func (k *mockAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) { func (n *mockAgent) getAgentMetrics(ctx context.Context, req *grpc.GetMetricsRequest) (*grpc.Metrics, error) {
return nil, nil return nil, nil
} }

View File

@ -219,7 +219,7 @@ const mountPerm = os.FileMode(0755)
// * recursively create the destination // * recursively create the destination
// pgtypes stands for propagation types, which are shared, private, slave, and ubind. // pgtypes stands for propagation types, which are shared, private, slave, and ubind.
func bindMount(ctx context.Context, source, destination string, readonly bool, pgtypes string) error { func bindMount(ctx context.Context, source, destination string, readonly bool, pgtypes string) error {
span, ctx := trace(ctx, "bindMount") span, _ := trace(ctx, "bindMount")
defer span.End() defer span.End()
if source == "" { if source == "" {
@ -347,7 +347,7 @@ func bindUnmountContainerRootfs(ctx context.Context, sharedDir, cID string) erro
} }
func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbox) error { func bindUnmountAllRootfs(ctx context.Context, sharedDir string, sandbox *Sandbox) error {
span, ctx := trace(ctx, "bindUnmountAllRootfs") span, _ := trace(ctx, "bindUnmountAllRootfs")
defer span.End() defer span.End()
var errors *merr.Error var errors *merr.Error

View File

@ -1273,7 +1273,7 @@ func (n *Network) Run(ctx context.Context, networkNSPath string, cb func() error
// Add adds all needed interfaces inside the network namespace. // Add adds all needed interfaces inside the network namespace.
func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, hotplug bool) ([]Endpoint, error) { func (n *Network) Add(ctx context.Context, config *NetworkConfig, s *Sandbox, hotplug bool) ([]Endpoint, error) {
span, ctx := n.trace(ctx, "Add") span, _ := n.trace(ctx, "Add")
defer span.End() defer span.End()
endpoints, err := createEndpointsFromScan(config.NetNSPath, config) endpoints, err := createEndpointsFromScan(config.NetNSPath, config)
@ -1354,7 +1354,7 @@ func (n *Network) PostAdd(ctx context.Context, ns *NetworkNamespace, hotplug boo
// Remove network endpoints in the network namespace. It also deletes the network // Remove network endpoints in the network namespace. It also deletes the network
// namespace in case the namespace has been created by us. // namespace in case the namespace has been created by us.
func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor hypervisor) error { func (n *Network) Remove(ctx context.Context, ns *NetworkNamespace, hypervisor hypervisor) error {
span, ctx := n.trace(ctx, "Remove") span, _ := n.trace(ctx, "Remove")
defer span.End() defer span.End()
for _, endpoint := range ns.Endpoints { for _, endpoint := range ns.Endpoints {

View File

@ -34,7 +34,6 @@ const (
) )
var defaultDialTimeout = 15 * time.Second var defaultDialTimeout = 15 * time.Second
var defaultCloseTimeout = 5 * time.Second
var hybridVSockPort uint32 var hybridVSockPort uint32
@ -70,8 +69,7 @@ func NewAgentClient(ctx context.Context, sock string) (*AgentClient, error) {
} }
var conn net.Conn var conn net.Conn
var d dialer var d = agentDialer(parsedAddr)
d = agentDialer(parsedAddr)
conn, err = d(grpcAddr, defaultDialTimeout) conn, err = d(grpcAddr, defaultDialTimeout)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -100,19 +100,25 @@ type RuntimeConfig struct {
AgentConfig vc.KataAgentConfig AgentConfig vc.KataAgentConfig
Console string
//Determines how the VM should be connected to the //Determines how the VM should be connected to the
//the container network interface //the container network interface
InterNetworkModel vc.NetInterworkingModel InterNetworkModel vc.NetInterworkingModel
FactoryConfig FactoryConfig FactoryConfig FactoryConfig
Debug bool
Trace bool
Console string
JaegerEndpoint string JaegerEndpoint string
JaegerUser string JaegerUser string
JaegerPassword string JaegerPassword string
//Paths to be bindmounted RO into the guest.
SandboxBindMounts []string
//Experimental features enabled
Experimental []exp.Feature
Debug bool
Trace bool
//Determines if seccomp should be applied inside guest //Determines if seccomp should be applied inside guest
DisableGuestSeccomp bool DisableGuestSeccomp bool
@ -122,12 +128,6 @@ type RuntimeConfig struct {
//Determines kata processes are managed only in sandbox cgroup //Determines kata processes are managed only in sandbox cgroup
SandboxCgroupOnly bool SandboxCgroupOnly bool
//Paths to be bindmounted RO into the guest.
SandboxBindMounts []string
//Experimental features enabled
Experimental []exp.Feature
// Determines if enable pprof // Determines if enable pprof
EnablePprof bool EnablePprof bool
} }
@ -819,7 +819,7 @@ func addHypervisporNetworkOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConf
if value, ok := ocispec.Annotations[vcAnnotations.RxRateLimiterMaxRate]; ok { if value, ok := ocispec.Annotations[vcAnnotations.RxRateLimiterMaxRate]; ok {
rxRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64) rxRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64)
if err != nil || rxRateLimiterMaxRate < 0 { if err != nil {
return fmt.Errorf("Error parsing annotation for rx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err) return fmt.Errorf("Error parsing annotation for rx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err)
} }
sbConfig.HypervisorConfig.RxRateLimiterMaxRate = rxRateLimiterMaxRate sbConfig.HypervisorConfig.RxRateLimiterMaxRate = rxRateLimiterMaxRate
@ -827,7 +827,7 @@ func addHypervisporNetworkOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConf
if value, ok := ocispec.Annotations[vcAnnotations.TxRateLimiterMaxRate]; ok { if value, ok := ocispec.Annotations[vcAnnotations.TxRateLimiterMaxRate]; ok {
txRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64) txRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64)
if err != nil || txRateLimiterMaxRate < 0 { if err != nil {
return fmt.Errorf("Error parsing annotation for tx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err) return fmt.Errorf("Error parsing annotation for tx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err)
} }
sbConfig.HypervisorConfig.TxRateLimiterMaxRate = txRateLimiterMaxRate sbConfig.HypervisorConfig.TxRateLimiterMaxRate = txRateLimiterMaxRate

View File

@ -121,13 +121,9 @@ const (
scsiControllerID = "scsi0" scsiControllerID = "scsi0"
rngID = "rng0" rngID = "rng0"
vsockKernelOption = "agent.use_vsock"
fallbackFileBackedMemDir = "/dev/shm" fallbackFileBackedMemDir = "/dev/shm"
) )
var qemuMajorVersion int
var qemuMinorVersion int
// agnostic list of kernel parameters // agnostic list of kernel parameters
var defaultKernelParameters = []Param{ var defaultKernelParameters = []Param{
{"panic", "1"}, {"panic", "1"},
@ -472,7 +468,7 @@ func (q *qemu) createSandbox(ctx context.Context, id string, networkNS NetworkNa
// Save the tracing context // Save the tracing context
q.ctx = ctx q.ctx = ctx
span, ctx := q.trace(ctx, "createSandbox") span, _ := q.trace(ctx, "createSandbox")
defer span.End() defer span.End()
if err := q.setup(ctx, id, hypervisorConfig); err != nil { if err := q.setup(ctx, id, hypervisorConfig); err != nil {
@ -776,7 +772,7 @@ func (q *qemu) setupVirtioMem() error {
// startSandbox will start the Sandbox's VM. // startSandbox will start the Sandbox's VM.
func (q *qemu) startSandbox(ctx context.Context, timeout int) error { func (q *qemu) startSandbox(ctx context.Context, timeout int) error {
span, ctx := q.trace(ctx, "startSandbox") span, _ := q.trace(ctx, "startSandbox")
defer span.End() defer span.End()
if q.config.Debug { if q.config.Debug {
@ -921,9 +917,6 @@ func (q *qemu) waitSandbox(ctx context.Context, timeout int) error {
q.qmpMonitorCh.disconn = disconnectCh q.qmpMonitorCh.disconn = disconnectCh
defer q.qmpShutdown() defer q.qmpShutdown()
qemuMajorVersion = ver.Major
qemuMinorVersion = ver.Minor
q.Logger().WithFields(logrus.Fields{ q.Logger().WithFields(logrus.Fields{
"qmp-major-version": ver.Major, "qmp-major-version": ver.Major,
"qmp-minor-version": ver.Minor, "qmp-minor-version": ver.Minor,
@ -1024,9 +1017,8 @@ func (q *qemu) togglePauseSandbox(ctx context.Context, pause bool) error {
if pause { if pause {
return q.qmpMonitorCh.qmp.ExecuteStop(q.qmpMonitorCh.ctx) return q.qmpMonitorCh.qmp.ExecuteStop(q.qmpMonitorCh.ctx)
} else {
return q.qmpMonitorCh.qmp.ExecuteCont(q.qmpMonitorCh.ctx)
} }
return q.qmpMonitorCh.qmp.ExecuteCont(q.qmpMonitorCh.ctx)
} }
func (q *qemu) qmpSetup() error { func (q *qemu) qmpSetup() error {
@ -1067,19 +1059,13 @@ func (q *qemu) qmpSetup() error {
} }
func (q *qemu) loopQMPEvent(event chan govmmQemu.QMPEvent) { func (q *qemu) loopQMPEvent(event chan govmmQemu.QMPEvent) {
for { for e := range event {
select { q.Logger().WithField("event", e).Debug("got QMP event")
case e, open := <-event: if e.Name == "GUEST_PANICKED" {
if !open { go q.handleGuestPanic()
q.Logger().Infof("QMP event channel closed")
return
}
q.Logger().WithField("event", e).Debug("got QMP event")
if e.Name == "GUEST_PANICKED" {
go q.handleGuestPanic()
}
} }
} }
q.Logger().Infof("QMP event channel closed")
} }
func (q *qemu) handleGuestPanic() { func (q *qemu) handleGuestPanic() {
@ -1116,13 +1102,12 @@ func (q *qemu) canDumpGuestMemory(dumpSavePath string) error {
exceptMemorySize := guestMemorySizeInBytes * 2 exceptMemorySize := guestMemorySizeInBytes * 2
if availSpaceInBytes >= exceptMemorySize { if availSpaceInBytes >= exceptMemorySize {
return nil return nil
} else {
return fmt.Errorf("there are not enough free space to store memory dump file. Except %d bytes, but only %d bytes available", exceptMemorySize, availSpaceInBytes)
} }
return fmt.Errorf("there are not enough free space to store memory dump file. Except %d bytes, but only %d bytes available", exceptMemorySize, availSpaceInBytes)
} }
// dumpSandboxMetaInfo save meta information for debug purpose, includes: // dumpSandboxMetaInfo save meta information for debug purpose, includes:
// hypervisor verison, sandbox/container state, hypervisor config // hypervisor version, sandbox/container state, hypervisor config
func (q *qemu) dumpSandboxMetaInfo(dumpSavePath string) { func (q *qemu) dumpSandboxMetaInfo(dumpSavePath string) {
dumpStatePath := filepath.Join(dumpSavePath, "state") dumpStatePath := filepath.Join(dumpSavePath, "state")
@ -1377,19 +1362,18 @@ func (q *qemu) hotplugBlockDevice(ctx context.Context, drive *config.BlockDrive,
if op == addDevice { if op == addDevice {
return q.hotplugAddBlockDevice(ctx, drive, op, devID) return q.hotplugAddBlockDevice(ctx, drive, op, devID)
} else { }
if q.config.BlockDeviceDriver == config.VirtioBlock { if q.config.BlockDeviceDriver == config.VirtioBlock {
if err := q.arch.removeDeviceFromBridge(drive.ID); err != nil { if err := q.arch.removeDeviceFromBridge(drive.ID); err != nil {
return err
}
}
if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, devID); err != nil {
return err return err
} }
return q.qmpMonitorCh.qmp.ExecuteBlockdevDel(q.qmpMonitorCh.ctx, drive.ID)
} }
if err := q.qmpMonitorCh.qmp.ExecuteDeviceDel(q.qmpMonitorCh.ctx, devID); err != nil {
return err
}
return q.qmpMonitorCh.qmp.ExecuteBlockdevDel(q.qmpMonitorCh.ctx, drive.ID)
} }
func (q *qemu) hotplugVhostUserDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation) error { func (q *qemu) hotplugVhostUserDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op operation) error {
@ -1625,7 +1609,7 @@ func (q *qemu) hotplugDevice(ctx context.Context, devInfo interface{}, devType d
} }
func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := q.trace(ctx, "hotplugAddDevice") span, _ := q.trace(ctx, "hotplugAddDevice")
defer span.End() defer span.End()
data, err := q.hotplugDevice(ctx, devInfo, devType, addDevice) data, err := q.hotplugDevice(ctx, devInfo, devType, addDevice)
@ -1637,7 +1621,7 @@ func (q *qemu) hotplugAddDevice(ctx context.Context, devInfo interface{}, devTyp
} }
func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) { func (q *qemu) hotplugRemoveDevice(ctx context.Context, devInfo interface{}, devType deviceType) (interface{}, error) {
span, ctx := q.trace(ctx, "hotplugRemoveDevice") span, _ := q.trace(ctx, "hotplugRemoveDevice")
defer span.End() defer span.End()
data, err := q.hotplugDevice(ctx, devInfo, devType, removeDevice) data, err := q.hotplugDevice(ctx, devInfo, devType, removeDevice)
@ -1849,14 +1833,14 @@ func (q *qemu) hotplugAddMemory(memDev *memoryDevice) (int, error) {
} }
func (q *qemu) pauseSandbox(ctx context.Context) error { func (q *qemu) pauseSandbox(ctx context.Context) error {
span, ctx := q.trace(ctx, "pauseSandbox") span, _ := q.trace(ctx, "pauseSandbox")
defer span.End() defer span.End()
return q.togglePauseSandbox(ctx, true) return q.togglePauseSandbox(ctx, true)
} }
func (q *qemu) resumeSandbox(ctx context.Context) error { func (q *qemu) resumeSandbox(ctx context.Context) error {
span, ctx := q.trace(ctx, "resumeSandbox") span, _ := q.trace(ctx, "resumeSandbox")
defer span.End() defer span.End()
return q.togglePauseSandbox(ctx, false) return q.togglePauseSandbox(ctx, false)

View File

@ -445,7 +445,7 @@ func (s *Sandbox) getAndStoreGuestDetails(ctx context.Context) error {
// to physically create that sandbox i.e. starts a VM for that sandbox to eventually // to physically create that sandbox i.e. starts a VM for that sandbox to eventually
// be started. // be started.
func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) { func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (*Sandbox, error) {
span, ctx := trace(ctx, "createSandbox") span, _ := trace(ctx, "createSandbox")
defer span.End() defer span.End()
if err := createAssets(ctx, &sandboxConfig); err != nil { if err := createAssets(ctx, &sandboxConfig); err != nil {
@ -483,7 +483,7 @@ func createSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Fac
} }
func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (sb *Sandbox, retErr error) { func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factory) (sb *Sandbox, retErr error) {
span, ctx := trace(ctx, "newSandbox") span, _ := trace(ctx, "newSandbox")
defer span.End() defer span.End()
if !sandboxConfig.valid() { if !sandboxConfig.valid() {
@ -618,7 +618,7 @@ func (s *Sandbox) createCgroupManager() error {
// storeSandbox stores a sandbox config. // storeSandbox stores a sandbox config.
func (s *Sandbox) storeSandbox(ctx context.Context) error { func (s *Sandbox) storeSandbox(ctx context.Context) error {
span, ctx := s.trace(ctx, "storeSandbox") span, _ := s.trace(ctx, "storeSandbox")
defer span.End() defer span.End()
// flush data to storage // flush data to storage
@ -628,15 +628,6 @@ func (s *Sandbox) storeSandbox(ctx context.Context) error {
return nil return nil
} }
func rLockSandbox(sandboxID string) (func() error, error) {
store, err := persist.GetDriver()
if err != nil {
return nil, fmt.Errorf("failed to get fs persist driver: %v", err)
}
return store.Lock(sandboxID, false)
}
func rwLockSandbox(sandboxID string) (func() error, error) { func rwLockSandbox(sandboxID string) (func() error, error) {
store, err := persist.GetDriver() store, err := persist.GetDriver()
if err != nil { if err != nil {
@ -761,7 +752,7 @@ func (s *Sandbox) createNetwork(ctx context.Context) error {
return nil return nil
} }
span, ctx := s.trace(ctx, "createNetwork") span, _ := s.trace(ctx, "createNetwork")
defer span.End() defer span.End()
s.networkNS = NetworkNamespace{ s.networkNS = NetworkNamespace{
@ -951,7 +942,7 @@ func (cw *consoleWatcher) start(s *Sandbox) (err error) {
scanner = bufio.NewScanner(cw.conn) scanner = bufio.NewScanner(cw.conn)
case consoleProtoPty: case consoleProtoPty:
// read-only // read-only
cw.ptyConsole, err = os.Open(cw.consoleURL) cw.ptyConsole, _ = os.Open(cw.consoleURL)
scanner = bufio.NewScanner(cw.ptyConsole) scanner = bufio.NewScanner(cw.ptyConsole)
default: default:
return fmt.Errorf("unknown console proto %s", cw.proto) return fmt.Errorf("unknown console proto %s", cw.proto)
@ -1003,7 +994,7 @@ func (cw *consoleWatcher) stop() {
// startVM starts the VM. // startVM starts the VM.
func (s *Sandbox) startVM(ctx context.Context) (err error) { func (s *Sandbox) startVM(ctx context.Context) (err error) {
span, ctx := s.trace(ctx, "startVM") span, _ := s.trace(ctx, "startVM")
defer span.End() defer span.End()
s.Logger().Info("Starting VM") s.Logger().Info("Starting VM")
@ -1084,7 +1075,7 @@ func (s *Sandbox) startVM(ctx context.Context) (err error) {
// stopVM: stop the sandbox's VM // stopVM: stop the sandbox's VM
func (s *Sandbox) stopVM(ctx context.Context) error { func (s *Sandbox) stopVM(ctx context.Context) error {
span, ctx := s.trace(ctx, "stopVM") span, _ := s.trace(ctx, "stopVM")
defer span.End() defer span.End()
s.Logger().Info("Stopping sandbox in the VM") s.Logger().Info("Stopping sandbox in the VM")
@ -1460,7 +1451,7 @@ func (s *Sandbox) ResumeContainer(ctx context.Context, containerID string) error
// createContainers registers all containers, create the // createContainers registers all containers, create the
// containers in the guest and starts one shim per container. // containers in the guest and starts one shim per container.
func (s *Sandbox) createContainers(ctx context.Context) error { func (s *Sandbox) createContainers(ctx context.Context) error {
span, ctx := s.trace(ctx, "createContainers") span, _ := s.trace(ctx, "createContainers")
defer span.End() defer span.End()
for _, contConfig := range s.config.Containers { for _, contConfig := range s.config.Containers {
@ -1532,7 +1523,7 @@ func (s *Sandbox) Start(ctx context.Context) error {
// will be destroyed. // will be destroyed.
// When force is true, ignore guest related stop failures. // When force is true, ignore guest related stop failures.
func (s *Sandbox) Stop(ctx context.Context, force bool) error { func (s *Sandbox) Stop(ctx context.Context, force bool) error {
span, ctx := s.trace(ctx, "Stop") span, _ := s.trace(ctx, "Stop")
defer span.End() defer span.End()
if s.state.State == types.StateStopped { if s.state.State == types.StateStopped {
@ -1643,7 +1634,7 @@ func (s *Sandbox) unsetSandboxBlockIndex(index int) error {
// HotplugAddDevice is used for add a device to sandbox // HotplugAddDevice is used for add a device to sandbox
// Sandbox implement DeviceReceiver interface from device/api/interface.go // Sandbox implement DeviceReceiver interface from device/api/interface.go
func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devType config.DeviceType) error { func (s *Sandbox) HotplugAddDevice(ctx context.Context, device api.Device, devType config.DeviceType) error {
span, ctx := s.trace(ctx, "HotplugAddDevice") span, _ := s.trace(ctx, "HotplugAddDevice")
defer span.End() defer span.End()
if s.config.SandboxCgroupOnly { if s.config.SandboxCgroupOnly {

View File

@ -62,7 +62,7 @@ var (
Help: "Open FDs for hypervisor.", Help: "Open FDs for hypervisor.",
}) })
agentRpcDurationsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ agentRPCDurationsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespaceKatashim, Namespace: namespaceKatashim,
Name: "agent_rpc_durations_histogram_milliseconds", Name: "agent_rpc_durations_histogram_milliseconds",
Help: "RPC latency distributions.", Help: "RPC latency distributions.",
@ -79,7 +79,7 @@ func RegisterMetrics() {
prometheus.MustRegister(hypervisorNetdev) prometheus.MustRegister(hypervisorNetdev)
prometheus.MustRegister(hypervisorIOStat) prometheus.MustRegister(hypervisorIOStat)
prometheus.MustRegister(hypervisorOpenFDs) prometheus.MustRegister(hypervisorOpenFDs)
prometheus.MustRegister(agentRpcDurationsHistogram) prometheus.MustRegister(agentRPCDurationsHistogram)
} }
// UpdateRuntimeMetrics update shim/hypervisor's metrics // UpdateRuntimeMetrics update shim/hypervisor's metrics