diff --git a/src/runtime/cli/kata-check.go b/src/runtime/cli/kata-check.go index 6755f52cf..8b8897881 100644 --- a/src/runtime/cli/kata-check.go +++ b/src/runtime/cli/kata-check.go @@ -63,7 +63,6 @@ const ( moduleParamDir = "parameters" successMessageCapable = "System is capable of running " + project successMessageCreate = "System can currently create " + project - successMessageVersion = "Version consistency of " + project + " is verified" failMessage = "System is not capable of running " + project kernelPropertyCorrect = "Kernel property value correct" @@ -389,7 +388,7 @@ EXAMPLES: span, _ := katautils.Trace(ctx, "kata-check") defer span.Finish() - if context.Bool("no-network-checks") == false && os.Getenv(noNetworkEnvVar) == "" { + if !context.Bool("no-network-checks") && os.Getenv(noNetworkEnvVar) == "" { cmd := RelCmdCheck if context.Bool("only-list-releases") { diff --git a/src/runtime/cli/main.go b/src/runtime/cli/main.go index 2b721eaa2..053207810 100644 --- a/src/runtime/cli/main.go +++ b/src/runtime/cli/main.go @@ -62,9 +62,6 @@ var originalLoggerLevel = logrus.WarnLevel var debug = false -// if true, coredump when an internal error occurs or a fatal signal is received -var crashOnError = false - // concrete virtcontainer implementation var virtcontainersImpl = &vc.VCImpl{} @@ -325,7 +322,6 @@ func beforeSubcommands(c *cli.Context) error { } if !subCmdIsCheckCmd { debug = runtimeConfig.Debug - crashOnError = runtimeConfig.Debug if traceRootSpan != "" { // Create the tracer. diff --git a/src/runtime/cli/main_test.go b/src/runtime/cli/main_test.go index cd51acda5..71ba75173 100644 --- a/src/runtime/cli/main_test.go +++ b/src/runtime/cli/main_test.go @@ -8,7 +8,6 @@ package main import ( "bytes" "context" - "encoding/json" "errors" "flag" "fmt" @@ -28,7 +27,6 @@ import ( "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/compatoci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/vcmock" - "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/stretchr/testify/assert" jaeger "github.com/uber/jaeger-client-go" @@ -43,10 +41,8 @@ const ( // small docker image used to create root filesystems from testDockerImage = "busybox" - testSandboxID = "99999999-9999-9999-99999999999999999" - testContainerID = "1" - testBundle = "bundle" - testConsole = "/dev/pts/999" + testBundle = "bundle" + testConsole = "/dev/pts/999" ) var ( @@ -386,44 +382,6 @@ func makeOCIBundle(bundleDir string) error { return nil } -func writeOCIConfigFile(spec specs.Spec, configPath string) error { - if configPath == "" { - return errors.New("BUG: need config file path") - } - - bytes, err := json.MarshalIndent(spec, "", "\t") - if err != nil { - return err - } - - return ioutil.WriteFile(configPath, bytes, testFileMode) -} - -func newSingleContainerStatus(containerID string, containerState types.ContainerState, annotations map[string]string, spec *specs.Spec) vc.ContainerStatus { - return vc.ContainerStatus{ - ID: containerID, - State: containerState, - Annotations: annotations, - Spec: spec, - } -} - -func execCLICommandFunc(assertHandler *assert.Assertions, cliCommand cli.Command, set *flag.FlagSet, expectedErr bool) { - ctx := createCLIContext(set) - ctx.App.Name = "foo" - - fn, ok := cliCommand.Action.(func(context *cli.Context) error) - assertHandler.True(ok) - - err := fn(ctx) - - if expectedErr { - assertHandler.Error(err) - } else { - assertHandler.Nil(err) - } -} - func createCLIContextWithApp(flagSet *flag.FlagSet, app *cli.App) *cli.Context { ctx := cli.NewContext(app, flagSet, nil) diff --git a/src/runtime/cli/utils.go b/src/runtime/cli/utils.go index 4d982e8cb..be2b9e5d7 100644 --- a/src/runtime/cli/utils.go +++ b/src/runtime/cli/utils.go @@ -189,21 +189,3 @@ func constructVersionInfo(version string) VersionInfo { } } - -func versionEqual(a VersionInfo, b VersionInfo) bool { - av, err := semver.Make(a.Semver) - if err != nil { - return false - } - - bv, err := semver.Make(b.Semver) - if err != nil { - return false - } - - if av.Major == bv.Major && av.Minor == bv.Minor && av.Patch == bv.Patch { - return true - } - - return false -} diff --git a/src/runtime/containerd-shim-v2/shim_management.go b/src/runtime/containerd-shim-v2/shim_management.go index f971a2a60..24b773605 100644 --- a/src/runtime/containerd-shim-v2/shim_management.go +++ b/src/runtime/containerd-shim-v2/shim_management.go @@ -65,8 +65,7 @@ func (s *service) serveMetrics(w http.ResponseWriter, r *http.Request) { // encode the metrics encoder := expfmt.NewEncoder(w, expfmt.FmtText) for _, mf := range mfs { - if err := encoder.Encode(mf); err != nil { - } + encoder.Encode(mf) } // if using an old agent, only collect shim/sandbox metrics. @@ -149,7 +148,7 @@ func (s *service) startManagementServer(ctx context.Context, ociSpec *specs.Spec shimMgtLog.Info("kata management inited") - // bind hanlder + // bind handler m := http.NewServeMux() m.Handle("/metrics", http.HandlerFunc(s.serveMetrics)) m.Handle("/agent-url", http.HandlerFunc(s.agentURL)) diff --git a/src/runtime/containerd-shim-v2/shim_metrics.go b/src/runtime/containerd-shim-v2/shim_metrics.go index 455af2a2b..0ff450685 100644 --- a/src/runtime/containerd-shim-v2/shim_metrics.go +++ b/src/runtime/containerd-shim-v2/shim_metrics.go @@ -176,7 +176,7 @@ func calcOverhead(initialSandboxStats, finishSandboxStats vc.SandboxStats, initi cpuUsageGuest := float64(guestFinalCPU-guestInitCPU) / deltaTime * 100 cpuUsageHost := float64(hostFinalCPU-hostInitCPU) / deltaTime * 100 - return float64(hostMemoryUsage - guestMemoryUsage), float64(cpuUsageHost - cpuUsageGuest) + return float64(hostMemoryUsage - guestMemoryUsage), cpuUsageHost - cpuUsageGuest } func (s *service) getPodOverhead() (float64, float64, error) { diff --git a/src/runtime/containerd-shim-v2/shim_metrics_test.go b/src/runtime/containerd-shim-v2/shim_metrics_test.go index 03b09ee99..fccb72cfe 100644 --- a/src/runtime/containerd-shim-v2/shim_metrics_test.go +++ b/src/runtime/containerd-shim-v2/shim_metrics_test.go @@ -96,7 +96,7 @@ func TestStatsSandbox(t *testing.T) { sandbox.StatsFunc = getSandboxCPUFunc(2000, 110000) sandbox.StatsContainerFunc = getStatsContainerCPUFunc(200, 400, 20000, 40000) - finishSandboxStats, finishContainersStats, err := s.statsSandbox() + finishSandboxStats, finishContainersStats, _ := s.statsSandbox() // calc overhead mem, cpu := calcOverhead(initialSandboxStats, finishSandboxStats, initialContainerStats, finishContainersStats, 1e9) diff --git a/src/runtime/pkg/kata-monitor/metrics_test.go b/src/runtime/pkg/kata-monitor/metrics_test.go index 8ea63b1d7..5263d2a93 100644 --- a/src/runtime/pkg/kata-monitor/metrics_test.go +++ b/src/runtime/pkg/kata-monitor/metrics_test.go @@ -107,14 +107,14 @@ func TestEncodeMetricFamily(t *testing.T) { scrapeCount.Inc() scrapeCount.Inc() - mfs, err := prometheus.DefaultGatherer.Gather() + mfs, _ := prometheus.DefaultGatherer.Gather() // create encoder buf := bytes.NewBufferString("") encoder := expfmt.NewEncoder(buf, expfmt.FmtText) // encode metrics to text format - err = encodeMetricFamily(mfs, encoder) + err := encodeMetricFamily(mfs, encoder) assert.Nil(err, "encodeMetricFamily should not return error") // here will be to many metrics, diff --git a/src/runtime/pkg/kata-monitor/monitor.go b/src/runtime/pkg/kata-monitor/monitor.go index 3254b5202..64266fdb7 100644 --- a/src/runtime/pkg/kata-monitor/monitor.go +++ b/src/runtime/pkg/kata-monitor/monitor.go @@ -38,7 +38,7 @@ type KataMonitor struct { // NewKataMonitor create and return a new KataMonitor instance func NewKataMonitor(containerdAddr, containerdConfigFile string) (*KataMonitor, error) { if containerdAddr == "" { - return nil, fmt.Errorf("Containerd serve address missing.") + return nil, fmt.Errorf("containerd serve address missing") } containerdConf := &srvconfig.Config{ @@ -82,7 +82,7 @@ func (km *KataMonitor) initSandboxCache() error { // GetAgentURL returns agent URL func (km *KataMonitor) GetAgentURL(w http.ResponseWriter, r *http.Request) { - sandboxID, err := getSandboxIdFromReq(r) + sandboxID, err := getSandboxIDFromReq(r) if err != nil { commonServeError(w, http.StatusBadRequest, err) return diff --git a/src/runtime/pkg/kata-monitor/pprof.go b/src/runtime/pkg/kata-monitor/pprof.go index 9e54315a4..86f39c466 100644 --- a/src/runtime/pkg/kata-monitor/pprof.go +++ b/src/runtime/pkg/kata-monitor/pprof.go @@ -21,7 +21,7 @@ func serveError(w http.ResponseWriter, status int, txt string) { } func (km *KataMonitor) composeSocketAddress(r *http.Request) (string, error) { - sandbox, err := getSandboxIdFromReq(r) + sandbox, err := getSandboxIDFromReq(r) if err != nil { return "", err } diff --git a/src/runtime/pkg/kata-monitor/sandbox_cache.go b/src/runtime/pkg/kata-monitor/sandbox_cache.go index 8d3b57975..f749ef464 100644 --- a/src/runtime/pkg/kata-monitor/sandbox_cache.go +++ b/src/runtime/pkg/kata-monitor/sandbox_cache.go @@ -157,7 +157,7 @@ func (sc *sandboxCache) startEventsListener(addr string) error { // if the container is a sandbox container, // means the VM is started, and can start to collect metrics from the VM. if isSandboxContainer(&c) { - // we can simply put the contaienrid in sandboxes list if the conatiner is a sandbox container + // we can simply put the contaienrid in sandboxes list if the container is a sandbox container sc.putIfNotExists(cc.ID, e.Namespace) monitorLog.WithField("container", cc.ID).Info("add sandbox to cache") } diff --git a/src/runtime/pkg/kata-monitor/shim_client.go b/src/runtime/pkg/kata-monitor/shim_client.go index 0c1c1c81a..b50b8bd78 100644 --- a/src/runtime/pkg/kata-monitor/shim_client.go +++ b/src/runtime/pkg/kata-monitor/shim_client.go @@ -25,7 +25,7 @@ func commonServeError(w http.ResponseWriter, status int, err error) { } } -func getSandboxIdFromReq(r *http.Request) (string, error) { +func getSandboxIDFromReq(r *http.Request) (string, error) { sandbox := r.URL.Query().Get("sandbox") if sandbox != "" { return sandbox, nil diff --git a/src/runtime/pkg/katautils/config-settings.go.in b/src/runtime/pkg/katautils/config-settings.go.in index 2aaba8f88..32a22313f 100644 --- a/src/runtime/pkg/katautils/config-settings.go.in +++ b/src/runtime/pkg/katautils/config-settings.go.in @@ -17,7 +17,6 @@ var defaultInitrdPath = "/usr/share/kata-containers/kata-containers-initrd.img" var defaultFirmwarePath = "" var defaultMachineAccelerators = "" var defaultCPUFeatures = "" -var defaultShimPath = "/usr/libexec/kata-containers/kata-shim" var systemdUnitName = "kata-containers.target" const defaultKernelParams = "" diff --git a/src/runtime/pkg/katautils/config.go b/src/runtime/pkg/katautils/config.go index b2df07b07..fa875a29b 100644 --- a/src/runtime/pkg/katautils/config.go +++ b/src/runtime/pkg/katautils/config.go @@ -71,12 +71,9 @@ type factory struct { type hypervisor struct { Path string `toml:"path"` - HypervisorPathList []string `toml:"valid_hypervisor_paths"` JailerPath string `toml:"jailer_path"` - JailerPathList []string `toml:"valid_jailer_paths"` Kernel string `toml:"kernel"` CtlPath string `toml:"ctlpath"` - CtlPathList []string `toml:"valid_ctlpaths"` Initrd string `toml:"initrd"` Image string `toml:"image"` Firmware string `toml:"firmware"` @@ -88,16 +85,23 @@ type hypervisor struct { EntropySource string `toml:"entropy_source"` SharedFS string `toml:"shared_fs"` VirtioFSDaemon string `toml:"virtio_fs_daemon"` - VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` VirtioFSCache string `toml:"virtio_fs_cache"` - VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` - VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` - BlockDeviceCacheSet bool `toml:"block_device_cache_set"` - BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` - BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` - EnableVhostUserStore bool `toml:"enable_vhost_user_store"` VhostUserStorePath string `toml:"vhost_user_store_path"` + FileBackedMemRootDir string `toml:"file_mem_backend"` + GuestHookPath string `toml:"guest_hook_path"` + GuestMemoryDumpPath string `toml:"guest_memory_dump_path"` + HypervisorPathList []string `toml:"valid_hypervisor_paths"` + JailerPathList []string `toml:"valid_jailer_paths"` + CtlPathList []string `toml:"valid_ctlpaths"` + VirtioFSDaemonList []string `toml:"valid_virtio_fs_daemon_paths"` + VirtioFSExtraArgs []string `toml:"virtio_fs_extra_args"` + PFlashList []string `toml:"pflashes"` VhostUserStorePathList []string `toml:"valid_vhost_user_store_paths"` + FileBackedMemRootList []string `toml:"valid_file_mem_backends"` + EnableAnnotations []string `toml:"enable_annotations"` + RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` + TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` + VirtioFSCacheSize uint32 `toml:"virtio_fs_cache_size"` NumVCPUs int32 `toml:"default_vcpus"` DefaultMaxVCPUs uint32 `toml:"default_maxvcpus"` MemorySize uint32 `toml:"default_memory"` @@ -106,14 +110,16 @@ type hypervisor struct { DefaultBridges uint32 `toml:"default_bridges"` Msize9p uint32 `toml:"msize_9p"` PCIeRootPort uint32 `toml:"pcie_root_port"` + BlockDeviceCacheSet bool `toml:"block_device_cache_set"` + BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` + BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` + EnableVhostUserStore bool `toml:"enable_vhost_user_store"` DisableBlockDeviceUse bool `toml:"disable_block_device_use"` MemPrealloc bool `toml:"enable_mem_prealloc"` HugePages bool `toml:"enable_hugepages"` VirtioMem bool `toml:"enable_virtio_mem"` IOMMU bool `toml:"enable_iommu"` IOMMUPlatform bool `toml:"enable_iommu_platform"` - FileBackedMemRootDir string `toml:"file_mem_backend"` - FileBackedMemRootList []string `toml:"valid_file_mem_backends"` Swap bool `toml:"enable_swap"` Debug bool `toml:"enable_debug"` DisableNestingChecks bool `toml:"disable_nesting_checks"` @@ -121,29 +127,30 @@ type hypervisor struct { DisableImageNvdimm bool `toml:"disable_image_nvdimm"` HotplugVFIOOnRootBus bool `toml:"hotplug_vfio_on_root_bus"` DisableVhostNet bool `toml:"disable_vhost_net"` - GuestHookPath string `toml:"guest_hook_path"` - RxRateLimiterMaxRate uint64 `toml:"rx_rate_limiter_max_rate"` - TxRateLimiterMaxRate uint64 `toml:"tx_rate_limiter_max_rate"` - EnableAnnotations []string `toml:"enable_annotations"` + GuestMemoryDumpPaging bool `toml:"guest_memory_dump_paging"` } type runtime struct { + InterNetworkModel string `toml:"internetworking_model"` + JaegerEndpoint string `toml:"jaeger_endpoint"` + JaegerUser string `toml:"jaeger_user"` + JaegerPassword string `toml:"jaeger_password"` + SandboxBindMounts []string `toml:"sandbox_bind_mounts"` + Experimental []string `toml:"experimental"` Debug bool `toml:"enable_debug"` Tracing bool `toml:"enable_tracing"` DisableNewNetNs bool `toml:"disable_new_netns"` DisableGuestSeccomp bool `toml:"disable_guest_seccomp"` SandboxCgroupOnly bool `toml:"sandbox_cgroup_only"` - Experimental []string `toml:"experimental"` - InterNetworkModel string `toml:"internetworking_model"` EnablePprof bool `toml:"enable_pprof"` } type agent struct { - Debug bool `toml:"enable_debug"` - Tracing bool `toml:"enable_tracing"` TraceMode string `toml:"trace_mode"` TraceType string `toml:"trace_type"` KernelModules []string `toml:"kernel_modules"` + Debug bool `toml:"enable_debug"` + Tracing bool `toml:"enable_tracing"` DebugConsoleEnabled bool `toml:"debug_console_enabled"` } @@ -424,20 +431,12 @@ func (h hypervisor) getInitrdAndImage() (initrd string, image string, err error) return } -func (h hypervisor) getRxRateLimiterCfg() (uint64, error) { - if h.RxRateLimiterMaxRate < 0 { - return 0, fmt.Errorf("rx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.RxRateLimiterMaxRate) - } - - return h.RxRateLimiterMaxRate, nil +func (h hypervisor) getRxRateLimiterCfg() uint64 { + return h.RxRateLimiterMaxRate } -func (h hypervisor) getTxRateLimiterCfg() (uint64, error) { - if h.TxRateLimiterMaxRate < 0 { - return 0, fmt.Errorf("tx Rate Limiter configuration must be greater than or equal to 0, max_rate %v", h.TxRateLimiterMaxRate) - } - - return h.TxRateLimiterMaxRate, nil +func (h hypervisor) getTxRateLimiterCfg() uint64 { + return h.TxRateLimiterMaxRate } func (h hypervisor) getIOMMUPlatform() bool { @@ -522,15 +521,8 @@ func newFirecrackerHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { return vc.HypervisorConfig{}, err } - rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } - - txRateLimiterMaxRate, err := h.getTxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } + rxRateLimiterMaxRate := h.getRxRateLimiterCfg() + txRateLimiterMaxRate := h.getTxRateLimiterCfg() return vc.HypervisorConfig{ HypervisorPath: hypervisor, @@ -626,15 +618,8 @@ func newQemuHypervisorConfig(h hypervisor) (vc.HypervisorConfig, error) { return vc.HypervisorConfig{}, err } - rxRateLimiterMaxRate, err := h.getRxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } - - txRateLimiterMaxRate, err := h.getTxRateLimiterCfg() - if err != nil { - return vc.HypervisorConfig{}, err - } + rxRateLimiterMaxRate := h.getRxRateLimiterCfg() + txRateLimiterMaxRate := h.getTxRateLimiterCfg() return vc.HypervisorConfig{ HypervisorPath: hypervisor, diff --git a/src/runtime/virtcontainers/api_test.go b/src/runtime/virtcontainers/api_test.go index 7c7fadae5..5ad97ea0a 100644 --- a/src/runtime/virtcontainers/api_test.go +++ b/src/runtime/virtcontainers/api_test.go @@ -15,7 +15,6 @@ import ( "testing" ktu "github.com/kata-containers/kata-containers/src/runtime/pkg/katatestutils" - "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/persist" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/mock" @@ -74,16 +73,6 @@ func newBasicTestCmd() types.Cmd { return cmd } -func rmSandboxDir(sid string) error { - store, err := persist.GetDriver() - if err != nil { - return fmt.Errorf("failed to get fs persist driver: %v", err) - } - - store.Destroy(sid) - return nil -} - func newTestSandboxConfigNoop() SandboxConfig { bundlePath := filepath.Join(testDir, testBundle) containerAnnotations[annotations.BundlePathKey] = bundlePath @@ -203,26 +192,6 @@ func TestCreateSandboxFailing(t *testing.T) { * Benchmarks */ -func createNewSandboxConfig(hType HypervisorType) SandboxConfig { - hypervisorConfig := HypervisorConfig{ - KernelPath: "/usr/share/kata-containers/vmlinux.container", - ImagePath: "/usr/share/kata-containers/kata-containers.img", - HypervisorPath: "/usr/bin/qemu-system-x86_64", - } - - netConfig := NetworkConfig{} - - return SandboxConfig{ - ID: testSandboxID, - HypervisorType: hType, - HypervisorConfig: hypervisorConfig, - - AgentConfig: KataAgentConfig{}, - - NetworkConfig: netConfig, - } -} - func newTestContainerConfigNoop(contID string) ContainerConfig { // Define the container command and bundle. container := ContainerConfig{ diff --git a/src/runtime/virtcontainers/fc.go b/src/runtime/virtcontainers/fc.go index 37fdbdc10..79685a768 100644 --- a/src/runtime/virtcontainers/fc.go +++ b/src/runtime/virtcontainers/fc.go @@ -1243,7 +1243,6 @@ func revertBytes(num uint64) uint64 { b := num % 1000 if a == 0 { return num - } else { - return 1024*revertBytes(a) + b } + return 1024*revertBytes(a) + b } diff --git a/src/runtime/virtcontainers/fc_metrics.go b/src/runtime/virtcontainers/fc_metrics.go index 2a608508b..bb0d423e5 100644 --- a/src/runtime/virtcontainers/fc_metrics.go +++ b/src/runtime/virtcontainers/fc_metrics.go @@ -174,11 +174,11 @@ func registerFirecrackerMetrics() { // updateFirecrackerMetrics update all metrics to the latest values. func updateFirecrackerMetrics(fm *FirecrackerMetrics) { - // set metrics for ApiServerMetrics - apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.ApiServer.ProcessStartupTimeUs)) - apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.ApiServer.ProcessStartupTimeCpuUs)) - apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.ApiServer.SyncResponseFails)) - apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.ApiServer.SyncVmmSendTimeoutCount)) + // set metrics for APIServerMetrics + apiServerMetrics.WithLabelValues("process_startup_time_us").Set(float64(fm.APIServer.ProcessStartupTimeUs)) + apiServerMetrics.WithLabelValues("process_startup_time_cpu_us").Set(float64(fm.APIServer.ProcessStartupTimeCPUUs)) + apiServerMetrics.WithLabelValues("sync_response_fails").Set(float64(fm.APIServer.SyncResponseFails)) + apiServerMetrics.WithLabelValues("sync_vmm_send_timeout_count").Set(float64(fm.APIServer.SyncVmmSendTimeoutCount)) // set metrics for BlockDeviceMetrics blockDeviceMetrics.WithLabelValues("activate_fails").Set(float64(fm.Block.ActivateFails)) @@ -199,10 +199,10 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { blockDeviceMetrics.WithLabelValues("rate_limiter_throttled_events").Set(float64(fm.Block.RateLimiterThrottledEvents)) // set metrics for GetRequestsMetrics - getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetApiRequests.InstanceInfoCount)) - getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetApiRequests.InstanceInfoFails)) - getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetApiRequests.MachineCfgCount)) - getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetApiRequests.MachineCfgFails)) + getRequestsMetrics.WithLabelValues("instance_info_count").Set(float64(fm.GetAPIRequests.InstanceInfoCount)) + getRequestsMetrics.WithLabelValues("instance_info_fails").Set(float64(fm.GetAPIRequests.InstanceInfoFails)) + getRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.GetAPIRequests.MachineCfgCount)) + getRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.GetAPIRequests.MachineCfgFails)) // set metrics for I8042DeviceMetrics i8042DeviceMetrics.WithLabelValues("error_count").Set(float64(fm.I8042.ErrorCount)) @@ -216,13 +216,13 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { performanceMetrics.WithLabelValues("full_create_snapshot").Set(float64(fm.LatenciesUs.FullCreateSnapshot)) performanceMetrics.WithLabelValues("diff_create_snapshot").Set(float64(fm.LatenciesUs.DiffCreateSnapshot)) performanceMetrics.WithLabelValues("load_snapshot").Set(float64(fm.LatenciesUs.LoadSnapshot)) - performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVm)) - performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVm)) + performanceMetrics.WithLabelValues("pause_vm").Set(float64(fm.LatenciesUs.PauseVM)) + performanceMetrics.WithLabelValues("resume_vm").Set(float64(fm.LatenciesUs.ResumeVM)) performanceMetrics.WithLabelValues("vmm_full_create_snapshot").Set(float64(fm.LatenciesUs.VmmFullCreateSnapshot)) performanceMetrics.WithLabelValues("vmm_diff_create_snapshot").Set(float64(fm.LatenciesUs.VmmDiffCreateSnapshot)) performanceMetrics.WithLabelValues("vmm_load_snapshot").Set(float64(fm.LatenciesUs.VmmLoadSnapshot)) - performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVm)) - performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVm)) + performanceMetrics.WithLabelValues("vmm_pause_vm").Set(float64(fm.LatenciesUs.VmmPauseVM)) + performanceMetrics.WithLabelValues("vmm_resume_vm").Set(float64(fm.LatenciesUs.VmmResumeVM)) // set metrics for LoggerSystemMetrics loggerSystemMetrics.WithLabelValues("missed_metrics_count").Set(float64(fm.Logger.MissedMetricsCount)) @@ -273,28 +273,28 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { netDeviceMetrics.WithLabelValues("tx_spoofed_mac_count").Set(float64(fm.Net.TxSpoofedMacCount)) // set metrics for PatchRequestsMetrics - patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchApiRequests.DriveCount)) - patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchApiRequests.DriveFails)) - patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchApiRequests.NetworkCount)) - patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchApiRequests.NetworkFails)) - patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchApiRequests.MachineCfgCount)) - patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchApiRequests.MachineCfgFails)) + patchRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PatchAPIRequests.DriveCount)) + patchRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PatchAPIRequests.DriveFails)) + patchRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PatchAPIRequests.NetworkCount)) + patchRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PatchAPIRequests.NetworkFails)) + patchRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PatchAPIRequests.MachineCfgCount)) + patchRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PatchAPIRequests.MachineCfgFails)) // set metrics for PutRequestsMetrics - putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutApiRequests.ActionsCount)) - putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutApiRequests.ActionsFails)) - putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutApiRequests.BootSourceCount)) - putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutApiRequests.BootSourceFails)) - putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutApiRequests.DriveCount)) - putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutApiRequests.DriveFails)) - putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutApiRequests.LoggerCount)) - putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutApiRequests.LoggerFails)) - putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutApiRequests.MachineCfgCount)) - putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutApiRequests.MachineCfgFails)) - putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutApiRequests.MetricsCount)) - putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutApiRequests.MetricsFails)) - putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutApiRequests.NetworkCount)) - putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutApiRequests.NetworkFails)) + putRequestsMetrics.WithLabelValues("actions_count").Set(float64(fm.PutAPIRequests.ActionsCount)) + putRequestsMetrics.WithLabelValues("actions_fails").Set(float64(fm.PutAPIRequests.ActionsFails)) + putRequestsMetrics.WithLabelValues("boot_source_count").Set(float64(fm.PutAPIRequests.BootSourceCount)) + putRequestsMetrics.WithLabelValues("boot_source_fails").Set(float64(fm.PutAPIRequests.BootSourceFails)) + putRequestsMetrics.WithLabelValues("drive_count").Set(float64(fm.PutAPIRequests.DriveCount)) + putRequestsMetrics.WithLabelValues("drive_fails").Set(float64(fm.PutAPIRequests.DriveFails)) + putRequestsMetrics.WithLabelValues("logger_count").Set(float64(fm.PutAPIRequests.LoggerCount)) + putRequestsMetrics.WithLabelValues("logger_fails").Set(float64(fm.PutAPIRequests.LoggerFails)) + putRequestsMetrics.WithLabelValues("machine_cfg_count").Set(float64(fm.PutAPIRequests.MachineCfgCount)) + putRequestsMetrics.WithLabelValues("machine_cfg_fails").Set(float64(fm.PutAPIRequests.MachineCfgFails)) + putRequestsMetrics.WithLabelValues("metrics_count").Set(float64(fm.PutAPIRequests.MetricsCount)) + putRequestsMetrics.WithLabelValues("metrics_fails").Set(float64(fm.PutAPIRequests.MetricsFails)) + putRequestsMetrics.WithLabelValues("network_count").Set(float64(fm.PutAPIRequests.NetworkCount)) + putRequestsMetrics.WithLabelValues("network_fails").Set(float64(fm.PutAPIRequests.NetworkFails)) // set metrics for RTCDeviceMetrics rTCDeviceMetrics.WithLabelValues("error_count").Set(float64(fm.Rtc.ErrorCount)) @@ -310,7 +310,7 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { vcpuMetrics.WithLabelValues("exit_mmio_read").Set(float64(fm.Vcpu.ExitMmioRead)) vcpuMetrics.WithLabelValues("exit_mmio_write").Set(float64(fm.Vcpu.ExitMmioWrite)) vcpuMetrics.WithLabelValues("failures").Set(float64(fm.Vcpu.Failures)) - vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCpuid)) + vcpuMetrics.WithLabelValues("filter_cpuid").Set(float64(fm.Vcpu.FilterCPUid)) // set metrics for VmmMetrics vmmMetrics.WithLabelValues("device_events").Set(float64(fm.Vmm.DeviceEvents)) @@ -355,11 +355,11 @@ func updateFirecrackerMetrics(fm *FirecrackerMetrics) { // Structure storing all metrics while enforcing serialization support on them. type FirecrackerMetrics struct { // API Server related metrics. - ApiServer ApiServerMetrics `json:"api_server"` + APIServer APIServerMetrics `json:"api_server"` // A block device's related metrics. Block BlockDeviceMetrics `json:"block"` // Metrics related to API GET requests. - GetApiRequests GetRequestsMetrics `json:"get_api_requests"` + GetAPIRequests GetRequestsMetrics `json:"get_api_requests"` // Metrics related to the i8042 device. I8042 I8042DeviceMetrics `json:"i8042"` // Metrics related to performance measurements. @@ -371,9 +371,9 @@ type FirecrackerMetrics struct { // A network device's related metrics. Net NetDeviceMetrics `json:"net"` // Metrics related to API PATCH requests. - PatchApiRequests PatchRequestsMetrics `json:"patch_api_requests"` + PatchAPIRequests PatchRequestsMetrics `json:"patch_api_requests"` // Metrics related to API PUT requests. - PutApiRequests PutRequestsMetrics `json:"put_api_requests"` + PutAPIRequests PutRequestsMetrics `json:"put_api_requests"` // Metrics related to the RTC device. Rtc RTCDeviceMetrics `json:"rtc"` // Metrics related to seccomp filtering. @@ -391,11 +391,11 @@ type FirecrackerMetrics struct { } // API Server related metrics. -type ApiServerMetrics struct { +type APIServerMetrics struct { // Measures the process's startup time in microseconds. ProcessStartupTimeUs uint64 `json:"process_startup_time_us"` // Measures the cpu's startup time in microseconds. - ProcessStartupTimeCpuUs uint64 `json:"process_startup_time_cpu_us"` + ProcessStartupTimeCPUUs uint64 `json:"process_startup_time_cpu_us"` // Number of failures on API requests triggered by internal errors. SyncResponseFails uint64 `json:"sync_response_fails"` // Number of timeouts during communication with the VMM. @@ -475,9 +475,9 @@ type PerformanceMetrics struct { // Measures the snapshot load time, at the API (user) level, in microseconds. LoadSnapshot uint64 `json:"load_snapshot"` // Measures the microVM pausing duration, at the API (user) level, in microseconds. - PauseVm uint64 `json:"pause_vm"` + PauseVM uint64 `json:"pause_vm"` // Measures the microVM resuming duration, at the API (user) level, in microseconds. - ResumeVm uint64 `json:"resume_vm"` + ResumeVM uint64 `json:"resume_vm"` // Measures the snapshot full create time, at the VMM level, in microseconds. VmmFullCreateSnapshot uint64 `json:"vmm_full_create_snapshot"` // Measures the snapshot diff create time, at the VMM level, in microseconds. @@ -485,9 +485,9 @@ type PerformanceMetrics struct { // Measures the snapshot load time, at the VMM level, in microseconds. VmmLoadSnapshot uint64 `json:"vmm_load_snapshot"` // Measures the microVM pausing duration, at the VMM level, in microseconds. - VmmPauseVm uint64 `json:"vmm_pause_vm"` + VmmPauseVM uint64 `json:"vmm_pause_vm"` // Measures the microVM resuming duration, at the VMM level, in microseconds. - VmmResumeVm uint64 `json:"vmm_resume_vm"` + VmmResumeVM uint64 `json:"vmm_resume_vm"` } // Logging related metrics. @@ -662,7 +662,7 @@ type VcpuMetrics struct { // Number of errors during this VCPU's run. Failures uint64 `json:"failures"` // Failures in configuring the CPUID. - FilterCpuid uint64 `json:"filter_cpuid"` + FilterCPUid uint64 `json:"filter_cpuid"` } // Metrics related to the virtual machine manager. diff --git a/src/runtime/virtcontainers/kata_agent.go b/src/runtime/virtcontainers/kata_agent.go index ac0cb7891..3579b1f59 100644 --- a/src/runtime/virtcontainers/kata_agent.go +++ b/src/runtime/virtcontainers/kata_agent.go @@ -26,7 +26,6 @@ import ( "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc" vcAnnotations "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" vccgroups "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/cgroups" - ns "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/nsenter" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/rootless" vcTypes "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/types" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/uuid" @@ -1337,14 +1336,6 @@ func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, return nil, err } - enterNSList := []ns.Namespace{} - if sandbox.networkNS.NetNsPath != "" { - enterNSList = append(enterNSList, ns.Namespace{ - Path: sandbox.networkNS.NetNsPath, - Type: ns.NSTypeNet, - }) - } - return buildProcessFromExecID(req.ExecId) } @@ -1975,7 +1966,7 @@ func (k *kataAgent) sendReq(request interface{}) (interface{}, error) { k.Logger().WithField("name", msgName).WithField("req", message.String()).Debug("sending request") defer func() { - agentRpcDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond))) + agentRPCDurationsHistogram.WithLabelValues(msgName).Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond))) }() return handler(ctx, request) } diff --git a/src/runtime/virtcontainers/mock_agent.go b/src/runtime/virtcontainers/mock_agent.go index 1db7bdb35..107814e00 100644 --- a/src/runtime/virtcontainers/mock_agent.go +++ b/src/runtime/virtcontainers/mock_agent.go @@ -22,6 +22,7 @@ import ( type mockAgent struct { } +// nolint:golint func NewMockAgent() agent { return &mockAgent{} } @@ -237,6 +238,6 @@ func (n *mockAgent) getOOMEvent() (string, error) { return "", nil } -func (k *mockAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) { +func (n *mockAgent) getAgentMetrics(req *grpc.GetMetricsRequest) (*grpc.Metrics, error) { return nil, nil } diff --git a/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go b/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go index a2a1a8659..367527af9 100644 --- a/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go +++ b/src/runtime/virtcontainers/pkg/agent/protocols/client/client.go @@ -36,7 +36,6 @@ const ( ) var defaultDialTimeout = 30 * time.Second -var defaultCloseTimeout = 5 * time.Second var hybridVSockPort uint32 @@ -72,8 +71,7 @@ func NewAgentClient(ctx context.Context, sock string) (*AgentClient, error) { } var conn net.Conn - var d dialer - d = agentDialer(parsedAddr) + var d = agentDialer(parsedAddr) conn, err = d(grpcAddr, defaultDialTimeout) if err != nil { return nil, err diff --git a/src/runtime/virtcontainers/pkg/oci/utils.go b/src/runtime/virtcontainers/pkg/oci/utils.go index 9776aea17..3427344d7 100644 --- a/src/runtime/virtcontainers/pkg/oci/utils.go +++ b/src/runtime/virtcontainers/pkg/oci/utils.go @@ -100,14 +100,21 @@ type RuntimeConfig struct { AgentConfig vc.KataAgentConfig - Console string - //Determines how the VM should be connected to the //the container network interface InterNetworkModel vc.NetInterworkingModel FactoryConfig FactoryConfig - Debug bool - Trace bool + + Console string + + //Paths to be bindmounted RO into the guest. + SandboxBindMounts []string + + //Experimental features enabled + Experimental []exp.Feature + + Debug bool + Trace bool //Determines if seccomp should be applied inside guest DisableGuestSeccomp bool @@ -118,9 +125,6 @@ type RuntimeConfig struct { //Determines kata processes are managed only in sandbox cgroup SandboxCgroupOnly bool - //Experimental features enabled - Experimental []exp.Feature - // Determines if enable pprof EnablePprof bool } @@ -805,7 +809,7 @@ func addHypervisporNetworkOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConf if value, ok := ocispec.Annotations[vcAnnotations.RxRateLimiterMaxRate]; ok { rxRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64) - if err != nil || rxRateLimiterMaxRate < 0 { + if err != nil { return fmt.Errorf("Error parsing annotation for rx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err) } sbConfig.HypervisorConfig.RxRateLimiterMaxRate = rxRateLimiterMaxRate @@ -813,7 +817,7 @@ func addHypervisporNetworkOverrides(ocispec specs.Spec, sbConfig *vc.SandboxConf if value, ok := ocispec.Annotations[vcAnnotations.TxRateLimiterMaxRate]; ok { txRateLimiterMaxRate, err := strconv.ParseUint(value, 10, 64) - if err != nil || txRateLimiterMaxRate < 0 { + if err != nil { return fmt.Errorf("Error parsing annotation for tx_rate_limiter_max_rate: %v, Please specify an integer greater than or equal to 0", err) } sbConfig.HypervisorConfig.TxRateLimiterMaxRate = txRateLimiterMaxRate diff --git a/src/runtime/virtcontainers/pkg/vcmock/types.go b/src/runtime/virtcontainers/pkg/vcmock/types.go index 9caa53232..8c6cd4f51 100644 --- a/src/runtime/virtcontainers/pkg/vcmock/types.go +++ b/src/runtime/virtcontainers/pkg/vcmock/types.go @@ -87,6 +87,6 @@ type VCMock struct { SetLoggerFunc func(ctx context.Context, logger *logrus.Entry) SetFactoryFunc func(ctx context.Context, factory vc.Factory) - CreateSandboxFunc func(ctx context.Context, sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error) + CreateSandboxFunc func(ctx context.Context, sandboxConfig vc.SandboxConfig) (vc.VCSandbox, error) CleanupContainerFunc func(ctx context.Context, sandboxID, containerID string, force bool) error } diff --git a/src/runtime/virtcontainers/qemu.go b/src/runtime/virtcontainers/qemu.go index 77d97d991..c52f5fcb9 100644 --- a/src/runtime/virtcontainers/qemu.go +++ b/src/runtime/virtcontainers/qemu.go @@ -111,13 +111,9 @@ const ( scsiControllerID = "scsi0" rngID = "rng0" - vsockKernelOption = "agent.use_vsock" fallbackFileBackedMemDir = "/dev/shm" ) -var qemuMajorVersion int -var qemuMinorVersion int - // agnostic list of kernel parameters var defaultKernelParameters = []Param{ {"panic", "1"}, @@ -932,9 +928,6 @@ func (q *qemu) waitSandbox(timeout int) error { q.qmpMonitorCh.disconn = disconnectCh defer q.qmpShutdown() - qemuMajorVersion = ver.Major - qemuMinorVersion = ver.Minor - q.Logger().WithFields(logrus.Fields{ "qmp-major-version": ver.Major, "qmp-minor-version": ver.Minor, diff --git a/src/runtime/virtcontainers/sandbox.go b/src/runtime/virtcontainers/sandbox.go index 2fb2e6af3..62d8ab19e 100644 --- a/src/runtime/virtcontainers/sandbox.go +++ b/src/runtime/virtcontainers/sandbox.go @@ -635,15 +635,6 @@ func (s *Sandbox) storeSandbox() error { return nil } -func rLockSandbox(sandboxID string) (func() error, error) { - store, err := persist.GetDriver() - if err != nil { - return nil, fmt.Errorf("failed to get fs persist driver: %v", err) - } - - return store.Lock(sandboxID, false) -} - func rwLockSandbox(sandboxID string) (func() error, error) { store, err := persist.GetDriver() if err != nil { @@ -1002,7 +993,7 @@ func (cw *consoleWatcher) start(s *Sandbox) (err error) { scanner = bufio.NewScanner(cw.conn) case consoleProtoPty: // read-only - cw.ptyConsole, err = os.Open(cw.consoleURL) + cw.ptyConsole, _ = os.Open(cw.consoleURL) scanner = bufio.NewScanner(cw.ptyConsole) default: return fmt.Errorf("unknown console proto %s", cw.proto) diff --git a/src/runtime/virtcontainers/sandbox_metrics.go b/src/runtime/virtcontainers/sandbox_metrics.go index ce44d7441..083c7db23 100644 --- a/src/runtime/virtcontainers/sandbox_metrics.go +++ b/src/runtime/virtcontainers/sandbox_metrics.go @@ -60,7 +60,7 @@ var ( Help: "Open FDs for hypervisor.", }) - agentRpcDurationsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + agentRPCDurationsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: namespaceKatashim, Name: "agent_rpc_durations_histogram_milliseconds", Help: "RPC latency distributions.", @@ -77,7 +77,7 @@ func RegisterMetrics() { prometheus.MustRegister(hypervisorNetdev) prometheus.MustRegister(hypervisorIOStat) prometheus.MustRegister(hypervisorOpenFDs) - prometheus.MustRegister(agentRpcDurationsHistogram) + prometheus.MustRegister(agentRPCDurationsHistogram) } // UpdateRuntimeMetrics update shim/hypervisor's metrics