Bump cAdvisor to v0.43.0

Bumping cAdvisor from v0.39.2 -> v0.43.0

* Also pin transitive dependencies
  * containerd v1.4.9 -> v1.4.11
  * docker v20.10.2+incompatible> v20.10.7+incompatible

Signed-off-by: David Porter <david@porter.me>
This commit is contained in:
David Porter 2021-11-09 14:23:06 -08:00
parent e4adf7f31c
commit c6452be958
80 changed files with 1637 additions and 465 deletions

11
go.mod
View File

@ -32,7 +32,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.0 github.com/cpuguy83/go-md2man/v2 v2.0.0
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.2+incompatible github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153
@ -45,7 +45,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/golang/mock v1.5.0 github.com/golang/mock v1.5.0
github.com/golang/protobuf v1.5.2 github.com/golang/protobuf v1.5.2
github.com/google/cadvisor v0.39.2 github.com/google/cadvisor v0.43.0
github.com/google/go-cmp v0.5.5 github.com/google/go-cmp v0.5.5
github.com/google/gofuzz v1.1.0 github.com/google/gofuzz v1.1.0
github.com/google/uuid v1.1.2 github.com/google/uuid v1.1.2
@ -78,6 +78,7 @@ require (
github.com/storageos/go-api v2.2.0+incompatible github.com/storageos/go-api v2.2.0+incompatible
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netlink v1.1.0
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect
github.com/vmware/govmomi v0.20.3 github.com/vmware/govmomi v0.20.3
go.etcd.io/etcd/api/v3 v3.5.0 go.etcd.io/etcd/api/v3 v3.5.0
go.etcd.io/etcd/client/pkg/v3 v3.5.0 go.etcd.io/etcd/client/pkg/v3 v3.5.0
@ -198,7 +199,7 @@ replace (
github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.5.0 github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.5.0
github.com/containerd/cgroups => github.com/containerd/cgroups v1.0.1 github.com/containerd/cgroups => github.com/containerd/cgroups v1.0.1
github.com/containerd/console => github.com/containerd/console v1.0.2 github.com/containerd/console => github.com/containerd/console v1.0.2
github.com/containerd/containerd => github.com/containerd/containerd v1.4.9 github.com/containerd/containerd => github.com/containerd/containerd v1.4.11
github.com/containerd/continuity => github.com/containerd/continuity v0.1.0 github.com/containerd/continuity => github.com/containerd/continuity v0.1.0
github.com/containerd/fifo => github.com/containerd/fifo v1.0.0 github.com/containerd/fifo => github.com/containerd/fifo v1.0.0
github.com/containerd/go-runc => github.com/containerd/go-runc v1.0.0 github.com/containerd/go-runc => github.com/containerd/go-runc v1.0.0
@ -217,7 +218,7 @@ replace (
github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd
github.com/dnaeon/go-vcr => github.com/dnaeon/go-vcr v1.0.1 github.com/dnaeon/go-vcr => github.com/dnaeon/go-vcr v1.0.1
github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker => github.com/docker/docker v20.10.2+incompatible github.com/docker/docker => github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-connections => github.com/docker/go-connections v0.4.0 github.com/docker/go-connections => github.com/docker/go-connections v0.4.0
github.com/docker/go-units => github.com/docker/go-units v0.4.0 github.com/docker/go-units => github.com/docker/go-units v0.4.0
github.com/docopt/docopt-go => github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 github.com/docopt/docopt-go => github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
@ -262,7 +263,7 @@ replace (
github.com/golang/protobuf => github.com/golang/protobuf v1.5.2 github.com/golang/protobuf => github.com/golang/protobuf v1.5.2
github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e
github.com/google/btree => github.com/google/btree v1.0.1 github.com/google/btree => github.com/google/btree v1.0.1
github.com/google/cadvisor => github.com/google/cadvisor v0.39.2 github.com/google/cadvisor => github.com/google/cadvisor v0.43.0
github.com/google/go-cmp => github.com/google/go-cmp v0.5.5 github.com/google/go-cmp => github.com/google/go-cmp v0.5.5
github.com/google/gofuzz => github.com/google/gofuzz v1.1.0 github.com/google/gofuzz => github.com/google/gofuzz v1.1.0
github.com/google/martian/v3 => github.com/google/martian/v3 v3.1.0 github.com/google/martian/v3 => github.com/google/martian/v3 v3.1.0

12
go.sum
View File

@ -101,8 +101,8 @@ github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE= github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
github.com/containerd/containerd v1.4.9 h1:JIw9mjVw4LsGmnA/Bqg9j9e+XB7soOJufrKUpA6n2Ns= github.com/containerd/containerd v1.4.11 h1:QCGOUN+i70jEEL/A6JVIbhy4f4fanzAzSR4kNG7SlcE=
github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.11/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
@ -136,8 +136,8 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.2+incompatible h1:vFgEHPqWBTp4pTjdLwjAA4bSo3gvIGOYwuJTlEjVBCw= github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ=
github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
@ -212,8 +212,8 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e h1:KhcknUwkWHKZ
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cadvisor v0.39.2 h1:SzgL5IYoMZEFVA9usi0xCy8SXSVXKQ6aL/rYs/kQjXE= github.com/google/cadvisor v0.43.0 h1:z0ULgYPKZ7L/c7Zjq+ZD6ltklWwYdCSvBMgSjNC/hGo=
github.com/google/cadvisor v0.39.2/go.mod h1:kN93gpdevu+bpS227TyHVZyCU5bbqCzTj5T9drl34MI= github.com/google/cadvisor v0.43.0/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=

View File

@ -560,7 +560,7 @@ definitions:
format: "int64" format: "int64"
minimum: 0 minimum: 0
maximum: 100 maximum: 100
NanoCPUs: NanoCpus:
description: "CPU quota in units of 10<sup>-9</sup> CPUs." description: "CPU quota in units of 10<sup>-9</sup> CPUs."
type: "integer" type: "integer"
format: "int64" format: "int64"
@ -5466,7 +5466,7 @@ paths:
MemorySwap: 0 MemorySwap: 0
MemoryReservation: 0 MemoryReservation: 0
KernelMemory: 0 KernelMemory: 0
NanoCPUs: 500000 NanoCpus: 500000
CpuPercent: 80 CpuPercent: 80
CpuShares: 512 CpuShares: 512
CpuPeriod: 100000 CpuPeriod: 100000
@ -7310,7 +7310,7 @@ paths:
For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the
the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
[Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)

View File

@ -2,7 +2,7 @@
Package client is a Go client for the Docker Engine API. Package client is a Go client for the Docker Engine API.
For more information about the Engine API, see the documentation: For more information about the Engine API, see the documentation:
https://docs.docker.com/engine/reference/api/ https://docs.docker.com/engine/api/
Usage Usage

View File

@ -27,7 +27,7 @@ import (
"strings" "strings"
v1 "github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2" v2 "github.com/google/cadvisor/info/v2"
) )
// Client represents the base URL for a cAdvisor client. // Client represents the base URL for a cAdvisor client.

View File

@ -19,7 +19,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
) )
const metricLabelPrefix = "io.cadvisor.metric." const metricLabelPrefix = "io.cadvisor.metric."

View File

@ -18,7 +18,8 @@ import (
"time" "time"
"encoding/json" "encoding/json"
"github.com/google/cadvisor/info/v1"
v1 "github.com/google/cadvisor/info/v1"
) )
type Config struct { type Config struct {

View File

@ -17,7 +17,7 @@ package collector
import ( import (
"time" "time"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
) )
type FakeCollectorManager struct { type FakeCollectorManager struct {

View File

@ -25,7 +25,7 @@ import (
"time" "time"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
) )
type GenericCollector struct { type GenericCollector struct {

View File

@ -28,7 +28,7 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
) )
type PrometheusCollector struct { type PrometheusCollector struct {

View File

@ -17,7 +17,7 @@ package collector
import ( import (
"time" "time"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
) )
// TODO(vmarmol): Export to a custom metrics type when that is available. // TODO(vmarmol): Export to a custom metrics type when that is available.

View File

@ -96,7 +96,12 @@ func (fh *realFsHandler) update() error {
fh.usage.TotalUsageBytes = rootUsage.Bytes fh.usage.TotalUsageBytes = rootUsage.Bytes
} }
if fh.extraDir != "" && extraErr == nil { if fh.extraDir != "" && extraErr == nil {
fh.usage.TotalUsageBytes += extraUsage.Bytes if fh.rootfs != "" {
fh.usage.TotalUsageBytes += extraUsage.Bytes
} else {
// rootfs is empty, totalUsageBytes use extra usage bytes
fh.usage.TotalUsageBytes = extraUsage.Bytes
}
} }
// Combine errors into a single error to return // Combine errors into a single error to return

View File

@ -24,14 +24,15 @@ import (
"strings" "strings"
"time" "time"
"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils"
"github.com/karrick/godirwalk" "github.com/karrick/godirwalk"
"github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
"github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -104,7 +105,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
} }
// CPU. // CPU.
cpuRoot, ok := cgroupPaths["cpu"] cpuRoot, ok := getControllerPath(cgroupPaths, "cpu", cgroup2UnifiedMode)
if ok { if ok {
if utils.FileExists(cpuRoot) { if utils.FileExists(cpuRoot) {
if cgroup2UnifiedMode { if cgroup2UnifiedMode {
@ -151,7 +152,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
// Cpu Mask. // Cpu Mask.
// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case. // This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
cpusetRoot, ok := cgroupPaths["cpuset"] cpusetRoot, ok := getControllerPath(cgroupPaths, "cpuset", cgroup2UnifiedMode)
if ok { if ok {
if utils.FileExists(cpusetRoot) { if utils.FileExists(cpusetRoot) {
spec.HasCpu = true spec.HasCpu = true
@ -166,7 +167,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
} }
// Memory // Memory
memoryRoot, ok := cgroupPaths["memory"] memoryRoot, ok := getControllerPath(cgroupPaths, "memory", cgroup2UnifiedMode)
if ok { if ok {
if cgroup2UnifiedMode { if cgroup2UnifiedMode {
if utils.FileExists(path.Join(memoryRoot, "memory.max")) { if utils.FileExists(path.Join(memoryRoot, "memory.max")) {
@ -194,7 +195,7 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
} }
// Processes, read it's value from pids path directly // Processes, read it's value from pids path directly
pidsRoot, ok := cgroupPaths["pids"] pidsRoot, ok := getControllerPath(cgroupPaths, "pids", cgroup2UnifiedMode)
if ok { if ok {
if utils.FileExists(pidsRoot) { if utils.FileExists(pidsRoot) {
spec.HasProcesses = true spec.HasProcesses = true
@ -216,6 +217,19 @@ func getSpecInternal(cgroupPaths map[string]string, machineInfoFactory info.Mach
return spec, nil return spec, nil
} }
func getControllerPath(cgroupPaths map[string]string, controllerName string, cgroup2UnifiedMode bool) (string, bool) {
ok := false
path := ""
if cgroup2UnifiedMode {
path, ok = cgroupPaths[""]
} else {
path, ok = cgroupPaths[controllerName]
}
return path, ok
}
func readString(dirpath string, file string) string { func readString(dirpath string, file string) string {
cgroupFile := path.Join(dirpath, file) cgroupFile := path.Join(dirpath, file)

View File

@ -24,10 +24,10 @@ import (
containersapi "github.com/containerd/containerd/api/services/containers/v1" containersapi "github.com/containerd/containerd/api/services/containers/v1"
tasksapi "github.com/containerd/containerd/api/services/tasks/v1" tasksapi "github.com/containerd/containerd/api/services/tasks/v1"
versionapi "github.com/containerd/containerd/api/services/version/v1" versionapi "github.com/containerd/containerd/api/services/version/v1"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/pkg/dialer"
ptypes "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types"
"github.com/google/cadvisor/container/containerd/containers"
"github.com/google/cadvisor/container/containerd/errdefs"
"github.com/google/cadvisor/container/containerd/pkg/dialer"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/backoff" "google.golang.org/grpc/backoff"
) )

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

View File

@ -0,0 +1,106 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errdefs defines the common errors used throughout containerd
// packages.
//
// Use with errors.Wrap and error.Wrapf to add context to an error.
//
// To detect an error class, use the IsXXX functions to tell whether an error
// is of a certain type.
//
// The functions ToGRPC and FromGRPC can be used to map server-side and
// client-side errors to the correct types.
package errdefs
import (
"context"
"github.com/pkg/errors"
)
// Definitions of common error types used throughout containerd. All containerd
// errors returned by most packages will map into one of these errors classes.
// Packages should return errors of these types when they want to instruct a
// client to take a particular action.
//
// For the most part, we just try to provide local grpc errors. Most conditions
// map very well to those defined by grpc.
var (
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
ErrInvalidArgument = errors.New("invalid argument")
ErrNotFound = errors.New("not found")
ErrAlreadyExists = errors.New("already exists")
ErrFailedPrecondition = errors.New("failed precondition")
ErrUnavailable = errors.New("unavailable")
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
)
// IsInvalidArgument returns true if the error is due to an invalid argument
func IsInvalidArgument(err error) bool {
return errors.Is(err, ErrInvalidArgument)
}
// IsNotFound returns true if the error is due to a missing object
func IsNotFound(err error) bool {
return errors.Is(err, ErrNotFound)
}
// IsAlreadyExists returns true if the error is due to an already existing
// metadata item
func IsAlreadyExists(err error) bool {
return errors.Is(err, ErrAlreadyExists)
}
// IsFailedPrecondition returns true if an operation could not proceed to the
// lack of a particular condition
func IsFailedPrecondition(err error) bool {
return errors.Is(err, ErrFailedPrecondition)
}
// IsUnavailable returns true if the error is due to a resource being unavailable
func IsUnavailable(err error) bool {
return errors.Is(err, ErrUnavailable)
}
// IsNotImplemented returns true if the error is due to not being implemented
func IsNotImplemented(err error) bool {
return errors.Is(err, ErrNotImplemented)
}
// IsCanceled returns true if the error is due to `context.Canceled`.
func IsCanceled(err error) bool {
return errors.Is(err, context.Canceled)
}
// IsDeadlineExceeded returns true if the error is due to
// `context.DeadlineExceeded`.
func IsDeadlineExceeded(err error) bool {
return errors.Is(err, context.DeadlineExceeded)
}

View File

@ -0,0 +1,160 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errdefs
import (
"context"
"strings"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ToGRPC will attempt to map the backend containerd error into a grpc error,
// using the original error message as a description.
//
// Further information may be extracted from certain errors depending on their
// type.
//
// If the error is unmapped, the original error will be returned to be handled
// by the regular grpc error handling stack.
func ToGRPC(err error) error {
if err == nil {
return nil
}
if isGRPCError(err) {
// error has already been mapped to grpc
return err
}
switch {
case IsInvalidArgument(err):
return status.Errorf(codes.InvalidArgument, err.Error())
case IsNotFound(err):
return status.Errorf(codes.NotFound, err.Error())
case IsAlreadyExists(err):
return status.Errorf(codes.AlreadyExists, err.Error())
case IsFailedPrecondition(err):
return status.Errorf(codes.FailedPrecondition, err.Error())
case IsUnavailable(err):
return status.Errorf(codes.Unavailable, err.Error())
case IsNotImplemented(err):
return status.Errorf(codes.Unimplemented, err.Error())
case IsCanceled(err):
return status.Errorf(codes.Canceled, err.Error())
case IsDeadlineExceeded(err):
return status.Errorf(codes.DeadlineExceeded, err.Error())
}
return err
}
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
// and combining it with the target error string.
//
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
func ToGRPCf(err error, format string, args ...interface{}) error {
return ToGRPC(errors.Wrapf(err, format, args...))
}
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
func FromGRPC(err error) error {
if err == nil {
return nil
}
var cls error // divide these into error classes, becomes the cause
switch code(err) {
case codes.InvalidArgument:
cls = ErrInvalidArgument
case codes.AlreadyExists:
cls = ErrAlreadyExists
case codes.NotFound:
cls = ErrNotFound
case codes.Unavailable:
cls = ErrUnavailable
case codes.FailedPrecondition:
cls = ErrFailedPrecondition
case codes.Unimplemented:
cls = ErrNotImplemented
case codes.Canceled:
cls = context.Canceled
case codes.DeadlineExceeded:
cls = context.DeadlineExceeded
default:
cls = ErrUnknown
}
msg := rebaseMessage(cls, err)
if msg != "" {
err = errors.Wrap(cls, msg)
} else {
err = errors.WithStack(cls)
}
return err
}
// rebaseMessage removes the repeats for an error at the end of an error
// string. This will happen when taking an error over grpc then remapping it.
//
// Effectively, we just remove the string of cls from the end of err if it
// appears there.
func rebaseMessage(cls error, err error) string {
desc := errDesc(err)
clss := cls.Error()
if desc == clss {
return ""
}
return strings.TrimSuffix(desc, ": "+clss)
}
func isGRPCError(err error) bool {
_, ok := status.FromError(err)
return ok
}
func code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
func errDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}

View File

@ -34,6 +34,8 @@ import (
var ArgContainerdEndpoint = flag.String("containerd", "/run/containerd/containerd.sock", "containerd endpoint") var ArgContainerdEndpoint = flag.String("containerd", "/run/containerd/containerd.sock", "containerd endpoint")
var ArgContainerdNamespace = flag.String("containerd-namespace", "k8s.io", "containerd namespace") var ArgContainerdNamespace = flag.String("containerd-namespace", "k8s.io", "containerd namespace")
var containerdEnvMetadataWhiteList = flag.String("containerd_env_metadata_whitelist", "", "DEPRECATED: this flag will be removed, please use `env_metadata_whitelist`. A comma-separated list of environment variable keys matched with specified prefix that needs to be collected for containerd containers")
// The namespace under which containerd aliases are unique. // The namespace under which containerd aliases are unique.
const k8sContainerdNamespace = "containerd" const k8sContainerdNamespace = "containerd"
@ -46,7 +48,7 @@ type containerdFactory struct {
client ContainerdClient client ContainerdClient
version string version string
// Information about the mounted cgroup subsystems. // Information about the mounted cgroup subsystems.
cgroupSubsystems libcontainer.CgroupSubsystems cgroupSubsystems map[string]string
// Information about mounted filesystems. // Information about mounted filesystems.
fsInfo fs.FsInfo fsInfo fs.FsInfo
includedMetrics container.MetricSet includedMetrics container.MetricSet
@ -56,21 +58,27 @@ func (f *containerdFactory) String() string {
return k8sContainerdNamespace return k8sContainerdNamespace
} }
func (f *containerdFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) { func (f *containerdFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
client, err := Client(*ArgContainerdEndpoint, *ArgContainerdNamespace) client, err := Client(*ArgContainerdEndpoint, *ArgContainerdNamespace)
if err != nil { if err != nil {
return return
} }
metadataEnvs := []string{} containerdMetadataEnvAllowList := strings.Split(*containerdEnvMetadataWhiteList, ",")
// prefer using the unified metadataEnvAllowList
if len(metadataEnvAllowList) != 0 {
containerdMetadataEnvAllowList = metadataEnvAllowList
}
return newContainerdContainerHandler( return newContainerdContainerHandler(
client, client,
name, name,
f.machineInfoFactory, f.machineInfoFactory,
f.fsInfo, f.fsInfo,
&f.cgroupSubsystems, f.cgroupSubsystems,
inHostNamespace, inHostNamespace,
metadataEnvs, containerdMetadataEnvAllowList,
f.includedMetrics, f.includedMetrics,
) )
} }

View File

@ -16,7 +16,7 @@
package containerd package containerd
import ( import (
"github.com/containerd/containerd/namespaces" "github.com/google/cadvisor/container/containerd/namespaces"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
) )

View File

@ -21,15 +21,17 @@ import (
"strings" "strings"
"time" "time"
"github.com/containerd/containerd/errdefs" "github.com/google/cadvisor/container/containerd/errdefs"
"github.com/opencontainers/runc/libcontainer/cgroups"
"golang.org/x/net/context" "golang.org/x/net/context"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/common" "github.com/google/cadvisor/container/common"
containerlibcontainer "github.com/google/cadvisor/container/libcontainer" containerlibcontainer "github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
) )
type containerdContainerHandler struct { type containerdContainerHandler struct {
@ -58,13 +60,13 @@ func newContainerdContainerHandler(
name string, name string,
machineInfoFactory info.MachineInfoFactory, machineInfoFactory info.MachineInfoFactory,
fsInfo fs.FsInfo, fsInfo fs.FsInfo,
cgroupSubsystems *containerlibcontainer.CgroupSubsystems, cgroupSubsystems map[string]string,
inHostNamespace bool, inHostNamespace bool,
metadataEnvs []string, metadataEnvAllowList []string,
includedMetrics container.MetricSet, includedMetrics container.MetricSet,
) (container.ContainerHandler, error) { ) (container.ContainerHandler, error) {
// Create the cgroup paths. // Create the cgroup paths.
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name) cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems, name)
// Generate the equivalent cgroup manager for this container. // Generate the equivalent cgroup manager for this container.
cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths) cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths)
@ -133,11 +135,19 @@ func newContainerdContainerHandler(
} }
// Add the name and bare ID as aliases of the container. // Add the name and bare ID as aliases of the container.
handler.image = cntr.Image handler.image = cntr.Image
for _, envVar := range spec.Process.Env {
if envVar != "" { for _, exposedEnv := range metadataEnvAllowList {
splits := strings.SplitN(envVar, "=", 2) if exposedEnv == "" {
if len(splits) == 2 { // if no containerdEnvWhitelist provided, len(metadataEnvAllowList) == 1, metadataEnvAllowList[0] == ""
handler.envs[splits[0]] = splits[1] continue
}
for _, envVar := range spec.Process.Env {
if envVar != "" {
splits := strings.SplitN(envVar, "=", 2)
if len(splits) == 2 && strings.HasPrefix(splits[0], exposedEnv) {
handler.envs[splits[0]] = splits[1]
}
} }
} }
} }
@ -207,7 +217,11 @@ func (h *containerdContainerHandler) ListContainers(listType container.ListType)
} }
func (h *containerdContainerHandler) GetCgroupPath(resource string) (string, error) { func (h *containerdContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource] var res string
if !cgroups.IsCgroup2UnifiedMode() {
res = resource
}
path, ok := h.cgroupPaths[res]
if !ok { if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name) return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
} }

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.
@ -27,7 +40,7 @@ package identifiers
import ( import (
"regexp" "regexp"
"github.com/containerd/containerd/errdefs" "github.com/google/cadvisor/container/containerd/errdefs"
"github.com/pkg/errors" "github.com/pkg/errors"
) )

View File

@ -16,9 +16,10 @@
package install package install
import ( import (
"k8s.io/klog/v2"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/containerd" "github.com/google/cadvisor/container/containerd"
"k8s.io/klog/v2"
) )
func init() { func init() {

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.
@ -20,8 +33,8 @@ import (
"context" "context"
"os" "os"
"github.com/containerd/containerd/errdefs" "github.com/google/cadvisor/container/containerd/errdefs"
"github.com/containerd/containerd/identifiers" "github.com/google/cadvisor/container/containerd/identifiers"
"github.com/pkg/errors" "github.com/pkg/errors"
) )

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

View File

@ -1,3 +1,17 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows // +build !windows
/* /*

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Copyright The containerd Authors. Copyright The containerd Authors.

View File

@ -32,6 +32,9 @@ import (
// The namespace under which crio aliases are unique. // The namespace under which crio aliases are unique.
const CrioNamespace = "crio" const CrioNamespace = "crio"
// The namespace systemd runs components under.
const SystemdNamespace = "system-systemd"
// Regexp that identifies CRI-O cgroups // Regexp that identifies CRI-O cgroups
var crioCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`) var crioCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`)
@ -50,7 +53,7 @@ type crioFactory struct {
storageDir string storageDir string
// Information about the mounted cgroup subsystems. // Information about the mounted cgroup subsystems.
cgroupSubsystems libcontainer.CgroupSubsystems cgroupSubsystems map[string]string
// Information about mounted filesystems. // Information about mounted filesystems.
fsInfo fs.FsInfo fsInfo fs.FsInfo
@ -64,13 +67,11 @@ func (f *crioFactory) String() string {
return CrioNamespace return CrioNamespace
} }
func (f *crioFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) { func (f *crioFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
client, err := Client() client, err := Client()
if err != nil { if err != nil {
return return
} }
// TODO are there any env vars we need to white list, if so, do it here...
metadataEnvs := []string{}
handler, err = newCrioContainerHandler( handler, err = newCrioContainerHandler(
client, client,
name, name,
@ -78,9 +79,9 @@ func (f *crioFactory) NewContainerHandler(name string, inHostNamespace bool) (ha
f.fsInfo, f.fsInfo,
f.storageDriver, f.storageDriver,
f.storageDir, f.storageDir,
&f.cgroupSubsystems, f.cgroupSubsystems,
inHostNamespace, inHostNamespace,
metadataEnvs, metadataEnvAllowList,
f.includedMetrics, f.includedMetrics,
) )
return return
@ -116,6 +117,9 @@ func (f *crioFactory) CanHandleAndAccept(name string) (bool, bool, error) {
if !strings.HasPrefix(path.Base(name), CrioNamespace) { if !strings.HasPrefix(path.Base(name), CrioNamespace) {
return false, false, nil return false, false, nil
} }
if strings.HasPrefix(path.Base(name), SystemdNamespace) {
return true, false, nil
}
// if the container is not associated with CRI-O, we can't handle it or accept it. // if the container is not associated with CRI-O, we can't handle it or accept it.
if !isContainerName(name) { if !isContainerName(name) {
return false, false, nil return false, false, nil

View File

@ -21,12 +21,13 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/common" "github.com/google/cadvisor/container/common"
containerlibcontainer "github.com/google/cadvisor/container/libcontainer" containerlibcontainer "github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/opencontainers/runc/libcontainer/cgroups"
) )
type crioContainerHandler struct { type crioContainerHandler struct {
@ -83,13 +84,13 @@ func newCrioContainerHandler(
fsInfo fs.FsInfo, fsInfo fs.FsInfo,
storageDriver storageDriver, storageDriver storageDriver,
storageDir string, storageDir string,
cgroupSubsystems *containerlibcontainer.CgroupSubsystems, cgroupSubsystems map[string]string,
inHostNamespace bool, inHostNamespace bool,
metadataEnvs []string, metadataEnvAllowList []string,
includedMetrics container.MetricSet, includedMetrics container.MetricSet,
) (container.ContainerHandler, error) { ) (container.ContainerHandler, error) {
// Create the cgroup paths. // Create the cgroup paths.
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name) cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems, name)
// Generate the equivalent cgroup manager for this container. // Generate the equivalent cgroup manager for this container.
cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths) cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths)
@ -186,7 +187,7 @@ func newCrioContainerHandler(
handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, storageLogDir, fsInfo) handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, rootfsStorageDir, storageLogDir, fsInfo)
} }
// TODO for env vars we wanted to show from container.Config.Env from whitelist // TODO for env vars we wanted to show from container.Config.Env from whitelist
//for _, exposedEnv := range metadataEnvs { //for _, exposedEnv := range metadataEnvAllowList {
//klog.V(4).Infof("TODO env whitelist: %v", exposedEnv) //klog.V(4).Infof("TODO env whitelist: %v", exposedEnv)
//} //}
@ -327,7 +328,11 @@ func (h *crioContainerHandler) ListContainers(listType container.ListType) ([]in
} }
func (h *crioContainerHandler) GetCgroupPath(resource string) (string, error) { func (h *crioContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource] var res string
if !cgroups.IsCgroup2UnifiedMode() {
res = resource
}
path, ok := h.cgroupPaths[res]
if !ok { if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name) return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
} }

View File

@ -16,9 +16,10 @@
package install package install
import ( import (
"k8s.io/klog/v2"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/crio" "github.com/google/cadvisor/container/crio"
"k8s.io/klog/v2"
) )
func init() { func init() {

View File

@ -15,11 +15,12 @@
package crio package crio
import ( import (
"k8s.io/klog/v2"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/watcher" "github.com/google/cadvisor/watcher"
"k8s.io/klog/v2"
) )
// NewPlugin returns an implementation of container.Plugin suitable for passing to container.RegisterPlugin() // NewPlugin returns an implementation of container.Plugin suitable for passing to container.RegisterPlugin()

View File

@ -25,7 +25,7 @@ import (
"time" "time"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/machine" "github.com/google/cadvisor/machine"
) )

View File

@ -26,6 +26,7 @@ import (
"github.com/blang/semver" "github.com/blang/semver"
dockertypes "github.com/docker/docker/api/types" dockertypes "github.com/docker/docker/api/types"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
dockerutil "github.com/google/cadvisor/container/docker/utils" dockerutil "github.com/google/cadvisor/container/docker/utils"
"github.com/google/cadvisor/container/libcontainer" "github.com/google/cadvisor/container/libcontainer"
@ -47,21 +48,21 @@ var ArgDockerCert = flag.String("docker-tls-cert", "cert.pem", "path to client c
var ArgDockerKey = flag.String("docker-tls-key", "key.pem", "path to private key") var ArgDockerKey = flag.String("docker-tls-key", "key.pem", "path to private key")
var ArgDockerCA = flag.String("docker-tls-ca", "ca.pem", "path to trusted CA") var ArgDockerCA = flag.String("docker-tls-ca", "ca.pem", "path to trusted CA")
var dockerEnvMetadataWhiteList = flag.String("docker_env_metadata_whitelist", "", "DEPRECATED: this flag will be removed, please use `env_metadata_whitelist`. A comma-separated list of environment variable keys matched with specified prefix that needs to be collected for docker containers")
// The namespace under which Docker aliases are unique. // The namespace under which Docker aliases are unique.
const DockerNamespace = "docker" const DockerNamespace = "docker"
// The retry times for getting docker root dir // The retry times for getting docker root dir
const rootDirRetries = 5 const rootDirRetries = 5
//The retry period for getting docker root dir, Millisecond // The retry period for getting docker root dir, Millisecond
const rootDirRetryPeriod time.Duration = 1000 * time.Millisecond const rootDirRetryPeriod time.Duration = 1000 * time.Millisecond
// Regexp that identifies docker cgroups, containers started with // Regexp that identifies docker cgroups, containers started with
// --cgroup-parent have another prefix than 'docker' // --cgroup-parent have another prefix than 'docker'
var dockerCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`) var dockerCgroupRegexp = regexp.MustCompile(`([a-z0-9]{64})`)
var dockerEnvWhitelist = flag.String("docker_env_metadata_whitelist", "", "a comma-separated list of environment variable keys matched with specified prefix that needs to be collected for docker containers")
var ( var (
// Basepath to all container specific information that libcontainer stores. // Basepath to all container specific information that libcontainer stores.
dockerRootDir string dockerRootDir string
@ -115,7 +116,7 @@ type dockerFactory struct {
client *docker.Client client *docker.Client
// Information about the mounted cgroup subsystems. // Information about the mounted cgroup subsystems.
cgroupSubsystems libcontainer.CgroupSubsystems cgroupSubsystems map[string]string
// Information about mounted filesystems. // Information about mounted filesystems.
fsInfo fs.FsInfo fsInfo fs.FsInfo
@ -136,13 +137,18 @@ func (f *dockerFactory) String() string {
return DockerNamespace return DockerNamespace
} }
func (f *dockerFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) { func (f *dockerFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
client, err := Client() client, err := Client()
if err != nil { if err != nil {
return return
} }
metadataEnvs := strings.Split(*dockerEnvWhitelist, ",") dockerMetadataEnvAllowList := strings.Split(*dockerEnvMetadataWhiteList, ",")
// prefer using the unified metadataEnvAllowList
if len(metadataEnvAllowList) != 0 {
dockerMetadataEnvAllowList = metadataEnvAllowList
}
handler, err = newDockerContainerHandler( handler, err = newDockerContainerHandler(
client, client,
@ -151,9 +157,9 @@ func (f *dockerFactory) NewContainerHandler(name string, inHostNamespace bool) (
f.fsInfo, f.fsInfo,
f.storageDriver, f.storageDriver,
f.storageDir, f.storageDir,
&f.cgroupSubsystems, f.cgroupSubsystems,
inHostNamespace, inHostNamespace,
metadataEnvs, dockerMetadataEnvAllowList,
f.dockerVersion, f.dockerVersion,
f.includedMetrics, f.includedMetrics,
f.thinPoolName, f.thinPoolName,

View File

@ -31,6 +31,7 @@ import (
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/zfs" "github.com/google/cadvisor/zfs"
"github.com/opencontainers/runc/libcontainer/cgroups"
dockercontainer "github.com/docker/docker/api/types/container" dockercontainer "github.com/docker/docker/api/types/container"
docker "github.com/docker/docker/client" docker "github.com/docker/docker/client"
@ -121,9 +122,9 @@ func newDockerContainerHandler(
fsInfo fs.FsInfo, fsInfo fs.FsInfo,
storageDriver storageDriver, storageDriver storageDriver,
storageDir string, storageDir string,
cgroupSubsystems *containerlibcontainer.CgroupSubsystems, cgroupSubsystems map[string]string,
inHostNamespace bool, inHostNamespace bool,
metadataEnvs []string, metadataEnvAllowList []string,
dockerVersion []int, dockerVersion []int,
includedMetrics container.MetricSet, includedMetrics container.MetricSet,
thinPoolName string, thinPoolName string,
@ -131,7 +132,7 @@ func newDockerContainerHandler(
zfsWatcher *zfs.ZfsWatcher, zfsWatcher *zfs.ZfsWatcher,
) (container.ContainerHandler, error) { ) (container.ContainerHandler, error) {
// Create the cgroup paths. // Create the cgroup paths.
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name) cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems, name)
// Generate the equivalent cgroup manager for this container. // Generate the equivalent cgroup manager for this container.
cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths) cgroupManager, err := containerlibcontainer.NewCgroupManager(name, cgroupPaths)
@ -249,9 +250,9 @@ func newDockerContainerHandler(
} }
// split env vars to get metadata map. // split env vars to get metadata map.
for _, exposedEnv := range metadataEnvs { for _, exposedEnv := range metadataEnvAllowList {
if exposedEnv == "" { if exposedEnv == "" {
// if no dockerEnvWhitelist provided, len(metadataEnvs) == 1, metadataEnvs[0] == "" // if no dockerEnvWhitelist provided, len(metadataEnvAllowList) == 1, metadataEnvAllowList[0] == ""
continue continue
} }
@ -484,7 +485,11 @@ func (h *dockerContainerHandler) ListContainers(listType container.ListType) ([]
} }
func (h *dockerContainerHandler) GetCgroupPath(resource string) (string, error) { func (h *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource] var res string
if !cgroups.IsCgroup2UnifiedMode() {
res = resource
}
path, ok := h.cgroupPaths[res]
if !ok { if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name) return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.reference.Name)
} }

View File

@ -16,9 +16,10 @@
package install package install
import ( import (
"k8s.io/klog/v2"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/docker" "github.com/google/cadvisor/container/docker"
"k8s.io/klog/v2"
) )
func init() { func init() {

View File

@ -17,12 +17,13 @@ package docker
import ( import (
"time" "time"
"golang.org/x/net/context"
"k8s.io/klog/v2"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/watcher" "github.com/google/cadvisor/watcher"
"golang.org/x/net/context"
"k8s.io/klog/v2"
) )
const dockerClientTimeout = 10 * time.Second const dockerClientTimeout = 10 * time.Second

View File

@ -16,6 +16,8 @@ package container
import ( import (
"fmt" "fmt"
"sort"
"strings"
"sync" "sync"
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
@ -27,7 +29,7 @@ import (
type ContainerHandlerFactory interface { type ContainerHandlerFactory interface {
// Create a new ContainerHandler using this factory. CanHandleAndAccept() must have returned true. // Create a new ContainerHandler using this factory. CanHandleAndAccept() must have returned true.
NewContainerHandler(name string, inHostNamespace bool) (c ContainerHandler, err error) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (c ContainerHandler, err error)
// Returns whether this factory can handle and accept the specified container. // Returns whether this factory can handle and accept the specified container.
CanHandleAndAccept(name string) (handle bool, accept bool, err error) CanHandleAndAccept(name string) (handle bool, accept bool, err error)
@ -64,6 +66,7 @@ const (
CPUTopologyMetrics MetricKind = "cpu_topology" CPUTopologyMetrics MetricKind = "cpu_topology"
ResctrlMetrics MetricKind = "resctrl" ResctrlMetrics MetricKind = "resctrl"
CPUSetMetrics MetricKind = "cpuset" CPUSetMetrics MetricKind = "cpuset"
OOMMetrics MetricKind = "oom_event"
) )
// AllMetrics represents all kinds of metrics that cAdvisor supported. // AllMetrics represents all kinds of metrics that cAdvisor supported.
@ -89,6 +92,7 @@ var AllMetrics = MetricSet{
CPUTopologyMetrics: struct{}{}, CPUTopologyMetrics: struct{}{},
ResctrlMetrics: struct{}{}, ResctrlMetrics: struct{}{},
CPUSetMetrics: struct{}{}, CPUSetMetrics: struct{}{},
OOMMetrics: struct{}{},
} }
func (mk MetricKind) String() string { func (mk MetricKind) String() string {
@ -102,15 +106,50 @@ func (ms MetricSet) Has(mk MetricKind) bool {
return exists return exists
} }
func (ms MetricSet) Add(mk MetricKind) { func (ms MetricSet) add(mk MetricKind) {
ms[mk] = struct{}{} ms[mk] = struct{}{}
} }
func (ms MetricSet) String() string {
values := make([]string, 0, len(ms))
for metric := range ms {
values = append(values, string(metric))
}
sort.Strings(values)
return strings.Join(values, ",")
}
// Not thread-safe, exported only for https://pkg.go.dev/flag#Value
func (ms *MetricSet) Set(value string) error {
*ms = MetricSet{}
if value == "" {
return nil
}
for _, metric := range strings.Split(value, ",") {
if AllMetrics.Has(MetricKind(metric)) {
(*ms).add(MetricKind(metric))
} else {
return fmt.Errorf("unsupported metric %q specified", metric)
}
}
return nil
}
func (ms MetricSet) Difference(ms1 MetricSet) MetricSet { func (ms MetricSet) Difference(ms1 MetricSet) MetricSet {
result := MetricSet{} result := MetricSet{}
for kind := range ms { for kind := range ms {
if !ms1.Has(kind) { if !ms1.Has(kind) {
result.Add(kind) result.add(kind)
}
}
return result
}
func (ms MetricSet) Append(ms1 MetricSet) MetricSet {
result := ms
for kind := range ms1 {
if !ms.Has(kind) {
result.add(kind)
} }
} }
return result return result
@ -198,12 +237,15 @@ func HasFactories() bool {
} }
// Create a new ContainerHandler for the specified container. // Create a new ContainerHandler for the specified container.
func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, inHostNamespace bool) (ContainerHandler, bool, error) { func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, metadataEnvAllowList []string, inHostNamespace bool) (ContainerHandler, bool, error) {
factoriesLock.RLock() factoriesLock.RLock()
defer factoriesLock.RUnlock() defer factoriesLock.RUnlock()
// Create the ContainerHandler with the first factory that supports it. // Create the ContainerHandler with the first factory that supports it.
for _, factory := range factories[watchType] { // Note that since RawContainerHandler can support a wide range of paths,
// it's evaluated last just to make sure if any other ContainerHandler
// can support it.
for _, factory := range GetReorderedFactoryList(watchType) {
canHandle, canAccept, err := factory.CanHandleAndAccept(name) canHandle, canAccept, err := factory.CanHandleAndAccept(name)
if err != nil { if err != nil {
klog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err) klog.V(4).Infof("Error trying to work out if we can handle %s: %v", name, err)
@ -214,7 +256,7 @@ func NewContainerHandler(name string, watchType watcher.ContainerWatchSource, in
return nil, false, nil return nil, false, nil
} }
klog.V(3).Infof("Using factory %q for container %q", factory, name) klog.V(3).Infof("Using factory %q for container %q", factory, name)
handle, err := factory.NewContainerHandler(name, inHostNamespace) handle, err := factory.NewContainerHandler(name, metadataEnvAllowList, inHostNamespace)
return handle, canAccept, err return handle, canAccept, err
} }
klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name) klog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
@ -246,3 +288,26 @@ func DebugInfo() map[string][]string {
} }
return out return out
} }
// GetReorderedFactoryList returns the list of ContainerHandlerFactory where the
// RawContainerHandler is always the last element.
func GetReorderedFactoryList(watchType watcher.ContainerWatchSource) []ContainerHandlerFactory {
ContainerHandlerFactoryList := make([]ContainerHandlerFactory, 0, len(factories))
var rawFactory ContainerHandlerFactory
for _, v := range factories[watchType] {
if v != nil {
if v.String() == "raw" {
rawFactory = v
continue
}
ContainerHandlerFactoryList = append(ContainerHandlerFactoryList, v)
}
}
if rawFactory != nil {
ContainerHandlerFactoryList = append(ContainerHandlerFactoryList, rawFactory)
}
return ContainerHandlerFactoryList
}

View File

@ -54,7 +54,10 @@ type Handler struct {
rootFs string rootFs string
pid int pid int
includedMetrics container.MetricSet includedMetrics container.MetricSet
// pidMetricsCache holds CPU scheduler stats for existing processes (map key is PID) between calls to schedulerStatsFromProcs.
pidMetricsCache map[int]*info.CpuSchedstat pidMetricsCache map[int]*info.CpuSchedstat
// pidMetricsSaved holds accumulated CPU scheduler stats for processes that no longer exist.
pidMetricsSaved info.CpuSchedstat
cycles uint64 cycles uint64
} }
@ -93,14 +96,9 @@ func (h *Handler) GetStats() (*info.ContainerStats, error) {
stats := newContainerStats(libcontainerStats, h.includedMetrics) stats := newContainerStats(libcontainerStats, h.includedMetrics)
if h.includedMetrics.Has(container.ProcessSchedulerMetrics) { if h.includedMetrics.Has(container.ProcessSchedulerMetrics) {
pids, err := h.cgroupManager.GetAllPids() stats.Cpu.Schedstat, err = h.schedulerStatsFromProcs()
if err != nil { if err != nil {
klog.V(4).Infof("Could not get PIDs for container %d: %v", h.pid, err) klog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
} else {
stats.Cpu.Schedstat, err = schedulerStatsFromProcs(h.rootFs, pids, h.pidMetricsCache)
if err != nil {
klog.V(4).Infof("Unable to get Process Scheduler Stats: %v", err)
}
} }
} }
@ -314,9 +312,14 @@ func processStatsFromProcs(rootFs string, cgroupPath string, rootPid int) (info.
return processStats, nil return processStats, nil
} }
func schedulerStatsFromProcs(rootFs string, pids []int, pidMetricsCache map[int]*info.CpuSchedstat) (info.CpuSchedstat, error) { func (h *Handler) schedulerStatsFromProcs() (info.CpuSchedstat, error) {
pids, err := h.cgroupManager.GetAllPids()
if err != nil {
return info.CpuSchedstat{}, fmt.Errorf("Could not get PIDs for container %d: %w", h.pid, err)
}
alivePids := make(map[int]struct{}, len(pids))
for _, pid := range pids { for _, pid := range pids {
f, err := os.Open(path.Join(rootFs, "proc", strconv.Itoa(pid), "schedstat")) f, err := os.Open(path.Join(h.rootFs, "proc", strconv.Itoa(pid), "schedstat"))
if err != nil { if err != nil {
return info.CpuSchedstat{}, fmt.Errorf("couldn't open scheduler statistics for process %d: %v", pid, err) return info.CpuSchedstat{}, fmt.Errorf("couldn't open scheduler statistics for process %d: %v", pid, err)
} }
@ -325,14 +328,15 @@ func schedulerStatsFromProcs(rootFs string, pids []int, pidMetricsCache map[int]
if err != nil { if err != nil {
return info.CpuSchedstat{}, fmt.Errorf("couldn't read scheduler statistics for process %d: %v", pid, err) return info.CpuSchedstat{}, fmt.Errorf("couldn't read scheduler statistics for process %d: %v", pid, err)
} }
alivePids[pid] = struct{}{}
rawMetrics := bytes.Split(bytes.TrimRight(contents, "\n"), []byte(" ")) rawMetrics := bytes.Split(bytes.TrimRight(contents, "\n"), []byte(" "))
if len(rawMetrics) != 3 { if len(rawMetrics) != 3 {
return info.CpuSchedstat{}, fmt.Errorf("unexpected number of metrics in schedstat file for process %d", pid) return info.CpuSchedstat{}, fmt.Errorf("unexpected number of metrics in schedstat file for process %d", pid)
} }
cacheEntry, ok := pidMetricsCache[pid] cacheEntry, ok := h.pidMetricsCache[pid]
if !ok { if !ok {
cacheEntry = &info.CpuSchedstat{} cacheEntry = &info.CpuSchedstat{}
pidMetricsCache[pid] = cacheEntry h.pidMetricsCache[pid] = cacheEntry
} }
for i, rawMetric := range rawMetrics { for i, rawMetric := range rawMetrics {
metric, err := strconv.ParseUint(string(rawMetric), 10, 64) metric, err := strconv.ParseUint(string(rawMetric), 10, 64)
@ -349,11 +353,20 @@ func schedulerStatsFromProcs(rootFs string, pids []int, pidMetricsCache map[int]
} }
} }
} }
schedstats := info.CpuSchedstat{} schedstats := h.pidMetricsSaved // copy
for _, v := range pidMetricsCache { for p, v := range h.pidMetricsCache {
schedstats.RunPeriods += v.RunPeriods schedstats.RunPeriods += v.RunPeriods
schedstats.RunqueueTime += v.RunqueueTime schedstats.RunqueueTime += v.RunqueueTime
schedstats.RunTime += v.RunTime schedstats.RunTime += v.RunTime
if _, alive := alivePids[p]; !alive {
// PID p is gone: accumulate its stats ...
h.pidMetricsSaved.RunPeriods += v.RunPeriods
h.pidMetricsSaved.RunqueueTime += v.RunqueueTime
h.pidMetricsSaved.RunTime += v.RunTime
// ... and remove its cache entry, to prevent
// pidMetricsCache from growing.
delete(h.pidMetricsCache, p)
}
} }
return schedstats, nil return schedstats, nil
} }
@ -383,7 +396,7 @@ func getReferencedKBytes(pids []int) (uint64, error) {
if err != nil { if err != nil {
klog.V(5).Infof("Cannot read %s file, err: %s", smapsFilePath, err) klog.V(5).Infof("Cannot read %s file, err: %s", smapsFilePath, err)
if os.IsNotExist(err) { if os.IsNotExist(err) {
continue //smaps file does not exists for all PIDs continue // smaps file does not exists for all PIDs
} }
return 0, err return 0, err
} }
@ -426,7 +439,7 @@ func clearReferencedBytes(pids []int, cycles uint64, resetInterval uint64) error
if cycles%resetInterval == 0 { if cycles%resetInterval == 0 {
for _, pid := range pids { for _, pid := range pids {
clearRefsFilePath := fmt.Sprintf(clearRefsFilePathPattern, pid) clearRefsFilePath := fmt.Sprintf(clearRefsFilePathPattern, pid)
clerRefsFile, err := os.OpenFile(clearRefsFilePath, os.O_WRONLY, 0644) clerRefsFile, err := os.OpenFile(clearRefsFilePath, os.O_WRONLY, 0o644)
if err != nil { if err != nil {
// clear_refs file may not exist for all PIDs // clear_refs file may not exist for all PIDs
continue continue
@ -455,9 +468,7 @@ func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error)
return ifaceStats, nil return ifaceStats, nil
} }
var ( var ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
)
func isIgnoredDevice(ifName string) bool { func isIgnoredDevice(ifName string) bool {
for _, prefix := range ignoredDevicePrefixes { for _, prefix := range ignoredDevicePrefixes {
@ -615,11 +626,9 @@ func scanAdvancedTCPStats(advancedStats *info.TcpAdvancedStat, advancedTCPStatsF
} }
return scanner.Err() return scanner.Err()
} }
func scanTCPStats(tcpStatsFile string) (info.TcpStat, error) { func scanTCPStats(tcpStatsFile string) (info.TcpStat, error) {
var stats info.TcpStat var stats info.TcpStat
data, err := ioutil.ReadFile(tcpStatsFile) data, err := ioutil.ReadFile(tcpStatsFile)
@ -628,17 +637,17 @@ func scanTCPStats(tcpStatsFile string) (info.TcpStat, error) {
} }
tcpStateMap := map[string]uint64{ tcpStateMap := map[string]uint64{
"01": 0, //ESTABLISHED "01": 0, // ESTABLISHED
"02": 0, //SYN_SENT "02": 0, // SYN_SENT
"03": 0, //SYN_RECV "03": 0, // SYN_RECV
"04": 0, //FIN_WAIT1 "04": 0, // FIN_WAIT1
"05": 0, //FIN_WAIT2 "05": 0, // FIN_WAIT2
"06": 0, //TIME_WAIT "06": 0, // TIME_WAIT
"07": 0, //CLOSE "07": 0, // CLOSE
"08": 0, //CLOSE_WAIT "08": 0, // CLOSE_WAIT
"09": 0, //LAST_ACK "09": 0, // LAST_ACK
"0A": 0, //LISTEN "0A": 0, // LISTEN
"0B": 0, //CLOSING "0B": 0, // CLOSING
} }
reader := strings.NewReader(string(data)) reader := strings.NewReader(string(data))
@ -779,14 +788,14 @@ func setCPUStats(s *cgroups.Stats, ret *info.ContainerStats, withPerCPU bool) {
} }
func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) { func setDiskIoStats(s *cgroups.Stats, ret *info.ContainerStats) {
ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive) ret.DiskIo.IoServiceBytes = diskStatsCopy(s.BlkioStats.IoServiceBytesRecursive)
ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive) ret.DiskIo.IoServiced = diskStatsCopy(s.BlkioStats.IoServicedRecursive)
ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive) ret.DiskIo.IoQueued = diskStatsCopy(s.BlkioStats.IoQueuedRecursive)
ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive) ret.DiskIo.Sectors = diskStatsCopy(s.BlkioStats.SectorsRecursive)
ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive) ret.DiskIo.IoServiceTime = diskStatsCopy(s.BlkioStats.IoServiceTimeRecursive)
ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive) ret.DiskIo.IoWaitTime = diskStatsCopy(s.BlkioStats.IoWaitTimeRecursive)
ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive) ret.DiskIo.IoMerged = diskStatsCopy(s.BlkioStats.IoMergedRecursive)
ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive) ret.DiskIo.IoTime = diskStatsCopy(s.BlkioStats.IoTimeRecursive)
} }
func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) { func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
@ -797,7 +806,7 @@ func setMemoryStats(s *cgroups.Stats, ret *info.ContainerStats) {
if cgroups.IsCgroup2UnifiedMode() { if cgroups.IsCgroup2UnifiedMode() {
ret.Memory.Cache = s.MemoryStats.Stats["file"] ret.Memory.Cache = s.MemoryStats.Stats["file"]
ret.Memory.RSS = s.MemoryStats.Stats["anon"] ret.Memory.RSS = s.MemoryStats.Stats["anon"]
ret.Memory.Swap = s.MemoryStats.SwapUsage.Usage ret.Memory.Swap = s.MemoryStats.SwapUsage.Usage - s.MemoryStats.Usage.Usage
ret.Memory.MappedFile = s.MemoryStats.Stats["file_mapped"] ret.Memory.MappedFile = s.MemoryStats.Stats["file_mapped"]
} else if s.MemoryStats.UseHierarchy { } else if s.MemoryStats.UseHierarchy {
ret.Memory.Cache = s.MemoryStats.Stats["total_cache"] ret.Memory.Cache = s.MemoryStats.Stats["total_cache"]
@ -896,7 +905,6 @@ func setThreadsStats(s *cgroups.Stats, ret *info.ContainerStats) {
ret.Processes.ThreadsCurrent = s.PidsStats.Current ret.Processes.ThreadsCurrent = s.PidsStats.Current
ret.Processes.ThreadsMax = s.PidsStats.Limit ret.Processes.ThreadsMax = s.PidsStats.Limit
} }
} }
func newContainerStats(libcontainerStats *libcontainer.Stats, includedMetrics container.MetricSet) *info.ContainerStats { func newContainerStats(libcontainerStats *libcontainer.Stats, includedMetrics container.MetricSet) *info.ContainerStats {

View File

@ -19,71 +19,49 @@ import (
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/container"
"github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/google/cadvisor/container"
fs "github.com/opencontainers/runc/libcontainer/cgroups/fs" fs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
fs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2" fs2 "github.com/opencontainers/runc/libcontainer/cgroups/fs2"
configs "github.com/opencontainers/runc/libcontainer/configs" configs "github.com/opencontainers/runc/libcontainer/configs"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
type CgroupSubsystems struct { // GetCgroupSubsystems returns information about the cgroup subsystems that are
// Cgroup subsystem mounts. // of interest as a map of cgroup controllers to their mount points.
// e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"] // For example, "cpu" -> "/sys/fs/cgroup/cpu".
Mounts []cgroups.Mount //
// The incudeMetrics arguments specifies which metrics are requested,
// Cgroup subsystem to their mount location. // and is used to filter out some cgroups and their mounts. If nil,
// e.g.: "cpu" -> "/sys/fs/cgroup/cpu" // all supported cgroup subsystems are included.
MountPoints map[string]string //
} // For cgroup v2, includedMetrics argument is unused, the only map key is ""
// (empty string), and the value is the unified cgroup mount point.
// Get information about the cgroup subsystems those we want func GetCgroupSubsystems(includedMetrics container.MetricSet) (map[string]string, error) {
func GetCgroupSubsystems(includedMetrics container.MetricSet) (CgroupSubsystems, error) { if cgroups.IsCgroup2UnifiedMode() {
return map[string]string{"": fs2.UnifiedMountpoint}, nil
}
// Get all cgroup mounts. // Get all cgroup mounts.
allCgroups, err := cgroups.GetCgroupMounts(true) allCgroups, err := cgroups.GetCgroupMounts(true)
if err != nil { if err != nil {
return CgroupSubsystems{}, err return nil, err
} }
disableCgroups := map[string]struct{}{} return getCgroupSubsystemsHelper(allCgroups, includedMetrics)
//currently we only support disable blkio subsystem
if !includedMetrics.Has(container.DiskIOMetrics) {
disableCgroups["blkio"] = struct{}{}
disableCgroups["io"] = struct{}{}
}
return getCgroupSubsystemsHelper(allCgroups, disableCgroups)
} }
// Get information about all the cgroup subsystems. func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount, includedMetrics container.MetricSet) (map[string]string, error) {
func GetAllCgroupSubsystems() (CgroupSubsystems, error) {
// Get all cgroup mounts.
allCgroups, err := cgroups.GetCgroupMounts(true)
if err != nil {
return CgroupSubsystems{}, err
}
emptyDisableCgroups := map[string]struct{}{}
return getCgroupSubsystemsHelper(allCgroups, emptyDisableCgroups)
}
func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount, disableCgroups map[string]struct{}) (CgroupSubsystems, error) {
if len(allCgroups) == 0 { if len(allCgroups) == 0 {
return CgroupSubsystems{}, fmt.Errorf("failed to find cgroup mounts") return nil, fmt.Errorf("failed to find cgroup mounts")
} }
// Trim the mounts to only the subsystems we care about. // Trim the mounts to only the subsystems we care about.
supportedCgroups := make([]cgroups.Mount, 0, len(allCgroups))
recordedMountpoints := make(map[string]struct{}, len(allCgroups))
mountPoints := make(map[string]string, len(allCgroups)) mountPoints := make(map[string]string, len(allCgroups))
for _, mount := range allCgroups { for _, mount := range allCgroups {
for _, subsystem := range mount.Subsystems { for _, subsystem := range mount.Subsystems {
if _, exists := disableCgroups[subsystem]; exists { if !needSubsys(subsystem, includedMetrics) {
continue
}
if _, ok := supportedSubsystems[subsystem]; !ok {
// Unsupported subsystem
continue continue
} }
if _, ok := mountPoints[subsystem]; ok { if _, ok := mountPoints[subsystem]; ok {
@ -91,36 +69,44 @@ func getCgroupSubsystemsHelper(allCgroups []cgroups.Mount, disableCgroups map[st
klog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem]) klog.V(5).Infof("skipping %s, already using mount at %s", mount.Mountpoint, mountPoints[subsystem])
continue continue
} }
if _, ok := recordedMountpoints[mount.Mountpoint]; !ok {
// avoid appending the same mount twice in e.g. `cpu,cpuacct` case
supportedCgroups = append(supportedCgroups, mount)
recordedMountpoints[mount.Mountpoint] = struct{}{}
}
mountPoints[subsystem] = mount.Mountpoint mountPoints[subsystem] = mount.Mountpoint
} }
} }
return CgroupSubsystems{ return mountPoints, nil
Mounts: supportedCgroups,
MountPoints: mountPoints,
}, nil
} }
// Cgroup subsystems we support listing (should be the minimal set we need stats from). // A map of cgroup subsystems we support listing (should be the minimal set
var supportedSubsystems map[string]struct{} = map[string]struct{}{ // we need stats from) to a respective MetricKind.
"cpu": {}, var supportedSubsystems = map[string]container.MetricKind{
"cpuacct": {}, "cpu": container.CpuUsageMetrics,
"memory": {}, "cpuacct": container.CpuUsageMetrics,
"hugetlb": {}, "memory": container.MemoryUsageMetrics,
"pids": {}, "hugetlb": container.HugetlbUsageMetrics,
"cpuset": {}, "pids": container.ProcessMetrics,
"blkio": {}, "cpuset": container.CPUSetMetrics,
"io": {}, "blkio": container.DiskIOMetrics,
"devices": {}, "io": container.DiskIOMetrics,
"perf_event": {}, "devices": "",
"perf_event": container.PerfMetrics,
} }
func DiskStatsCopy0(major, minor uint64) *info.PerDiskStats { // Check if this cgroup subsystem/controller is of use.
func needSubsys(name string, metrics container.MetricSet) bool {
// Check if supported.
metric, supported := supportedSubsystems[name]
if !supported {
return false
}
// Check if needed.
if metrics == nil || metric == "" {
return true
}
return metrics.Has(metric)
}
func diskStatsCopy0(major, minor uint64) *info.PerDiskStats {
disk := info.PerDiskStats{ disk := info.PerDiskStats{
Major: major, Major: major,
Minor: minor, Minor: minor,
@ -129,12 +115,12 @@ func DiskStatsCopy0(major, minor uint64) *info.PerDiskStats {
return &disk return &disk
} }
type DiskKey struct { type diskKey struct {
Major uint64 Major uint64
Minor uint64 Minor uint64
} }
func DiskStatsCopy1(diskStat map[DiskKey]*info.PerDiskStats) []info.PerDiskStats { func diskStatsCopy1(diskStat map[diskKey]*info.PerDiskStats) []info.PerDiskStats {
i := 0 i := 0
stat := make([]info.PerDiskStats, len(diskStat)) stat := make([]info.PerDiskStats, len(diskStat))
for _, disk := range diskStat { for _, disk := range diskStat {
@ -144,21 +130,21 @@ func DiskStatsCopy1(diskStat map[DiskKey]*info.PerDiskStats) []info.PerDiskStats
return stat return stat
} }
func DiskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) { func diskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStats) {
if len(blkioStats) == 0 { if len(blkioStats) == 0 {
return return
} }
diskStat := make(map[DiskKey]*info.PerDiskStats) diskStat := make(map[diskKey]*info.PerDiskStats)
for i := range blkioStats { for i := range blkioStats {
major := blkioStats[i].Major major := blkioStats[i].Major
minor := blkioStats[i].Minor minor := blkioStats[i].Minor
key := DiskKey{ key := diskKey{
Major: major, Major: major,
Minor: minor, Minor: minor,
} }
diskp, ok := diskStat[key] diskp, ok := diskStat[key]
if !ok { if !ok {
diskp = DiskStatsCopy0(major, minor) diskp = diskStatsCopy0(major, minor)
diskStat[key] = diskp diskStat[key] = diskp
} }
op := blkioStats[i].Op op := blkioStats[i].Op
@ -167,12 +153,12 @@ func DiskStatsCopy(blkioStats []cgroups.BlkioStatEntry) (stat []info.PerDiskStat
} }
diskp.Stats[op] = blkioStats[i].Value diskp.Stats[op] = blkioStats[i].Value
} }
return DiskStatsCopy1(diskStat) return diskStatsCopy1(diskStat)
} }
func NewCgroupManager(name string, paths map[string]string) (cgroups.Manager, error) { func NewCgroupManager(name string, paths map[string]string) (cgroups.Manager, error) {
if cgroups.IsCgroup2UnifiedMode() { if cgroups.IsCgroup2UnifiedMode() {
path := paths["cpu"] path := paths[""]
return fs2.NewManager(nil, path, false) return fs2.NewManager(nil, path, false)
} }
@ -180,5 +166,4 @@ func NewCgroupManager(name string, paths map[string]string) (cgroups.Manager, er
Name: name, Name: name,
} }
return fs.NewManager(&config, paths, false), nil return fs.NewManager(&config, paths, false), nil
} }

View File

@ -29,15 +29,17 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
var dockerOnly = flag.Bool("docker_only", false, "Only report docker containers in addition to root stats") var (
var disableRootCgroupStats = flag.Bool("disable_root_cgroup_stats", false, "Disable collecting root Cgroup stats") DockerOnly = flag.Bool("docker_only", false, "Only report docker containers in addition to root stats")
disableRootCgroupStats = flag.Bool("disable_root_cgroup_stats", false, "Disable collecting root Cgroup stats")
)
type rawFactory struct { type rawFactory struct {
// Factory for machine information. // Factory for machine information.
machineInfoFactory info.MachineInfoFactory machineInfoFactory info.MachineInfoFactory
// Information about the cgroup subsystems. // Information about the cgroup subsystems.
cgroupSubsystems *libcontainer.CgroupSubsystems cgroupSubsystems map[string]string
// Information about mounted filesystems. // Information about mounted filesystems.
fsInfo fs.FsInfo fsInfo fs.FsInfo
@ -56,7 +58,7 @@ func (f *rawFactory) String() string {
return "raw" return "raw"
} }
func (f *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) { func (f *rawFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (container.ContainerHandler, error) {
rootFs := "/" rootFs := "/"
if !inHostNamespace { if !inHostNamespace {
rootFs = "/rootfs" rootFs = "/rootfs"
@ -69,7 +71,7 @@ func (f *rawFactory) CanHandleAndAccept(name string) (bool, bool, error) {
if name == "/" { if name == "/" {
return true, true, nil return true, true, nil
} }
if *dockerOnly && f.rawPrefixWhiteList[0] == "" { if *DockerOnly && f.rawPrefixWhiteList[0] == "" {
return true, false, nil return true, false, nil
} }
for _, prefix := range f.rawPrefixWhiteList { for _, prefix := range f.rawPrefixWhiteList {
@ -89,7 +91,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, incl
if err != nil { if err != nil {
return fmt.Errorf("failed to get cgroup subsystems: %v", err) return fmt.Errorf("failed to get cgroup subsystems: %v", err)
} }
if len(cgroupSubsystems.Mounts) == 0 { if len(cgroupSubsystems) == 0 {
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory") return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
} }
@ -102,7 +104,7 @@ func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, incl
factory := &rawFactory{ factory := &rawFactory{
machineInfoFactory: machineInfoFactory, machineInfoFactory: machineInfoFactory,
fsInfo: fsInfo, fsInfo: fsInfo,
cgroupSubsystems: &cgroupSubsystems, cgroupSubsystems: cgroupSubsystems,
watcher: watcher, watcher: watcher,
includedMetrics: includedMetrics, includedMetrics: includedMetrics,
rawPrefixWhiteList: rawPrefixWhiteList, rawPrefixWhiteList: rawPrefixWhiteList,

View File

@ -24,6 +24,7 @@ import (
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/machine" "github.com/google/cadvisor/machine"
"github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -48,13 +49,13 @@ func isRootCgroup(name string) bool {
return name == "/" return name == "/"
} }
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) { func newRawContainerHandler(name string, cgroupSubsystems map[string]string, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *common.InotifyWatcher, rootFs string, includedMetrics container.MetricSet) (container.ContainerHandler, error) {
cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints) cHints, err := common.GetContainerHintsFromFile(*common.ArgContainerHints)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems.MountPoints, name) cgroupPaths := common.MakeCgroupPaths(cgroupSubsystems, name)
cgroupManager, err := libcontainer.NewCgroupManager(name, cgroupPaths) cgroupManager, err := libcontainer.NewCgroupManager(name, cgroupPaths)
if err != nil { if err != nil {
@ -244,7 +245,11 @@ func (h *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
} }
func (h *rawContainerHandler) GetCgroupPath(resource string) (string, error) { func (h *rawContainerHandler) GetCgroupPath(resource string) (string, error) {
path, ok := h.cgroupPaths[resource] var res string
if !cgroups.IsCgroup2UnifiedMode() {
res = resource
}
path, ok := h.cgroupPaths[res]
if !ok { if !ok {
return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.name) return "", fmt.Errorf("could not find path for resource %q for container %q", resource, h.name)
} }

View File

@ -23,10 +23,11 @@ import (
"path" "path"
"strings" "strings"
inotify "k8s.io/utils/inotify"
"github.com/google/cadvisor/container/common" "github.com/google/cadvisor/container/common"
"github.com/google/cadvisor/container/libcontainer" "github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/watcher" "github.com/google/cadvisor/watcher"
inotify "k8s.io/utils/inotify"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -35,8 +36,6 @@ type rawContainerWatcher struct {
// Absolute path to the root of the cgroup hierarchies // Absolute path to the root of the cgroup hierarchies
cgroupPaths map[string]string cgroupPaths map[string]string
cgroupSubsystems *libcontainer.CgroupSubsystems
// Inotify event watcher. // Inotify event watcher.
watcher *common.InotifyWatcher watcher *common.InotifyWatcher
@ -45,11 +44,11 @@ type rawContainerWatcher struct {
} }
func NewRawContainerWatcher() (watcher.ContainerWatcher, error) { func NewRawContainerWatcher() (watcher.ContainerWatcher, error) {
cgroupSubsystems, err := libcontainer.GetAllCgroupSubsystems() cgroupSubsystems, err := libcontainer.GetCgroupSubsystems(nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get cgroup subsystems: %v", err) return nil, fmt.Errorf("failed to get cgroup subsystems: %v", err)
} }
if len(cgroupSubsystems.Mounts) == 0 { if len(cgroupSubsystems) == 0 {
return nil, fmt.Errorf("failed to find supported cgroup mounts for the raw factory") return nil, fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
} }
@ -59,10 +58,9 @@ func NewRawContainerWatcher() (watcher.ContainerWatcher, error) {
} }
rawWatcher := &rawContainerWatcher{ rawWatcher := &rawContainerWatcher{
cgroupPaths: common.MakeCgroupPaths(cgroupSubsystems.MountPoints, "/"), cgroupPaths: cgroupSubsystems,
cgroupSubsystems: &cgroupSubsystems, watcher: watcher,
watcher: watcher, stopWatcher: make(chan error),
stopWatcher: make(chan error),
} }
return rawWatcher, nil return rawWatcher, nil
@ -195,8 +193,8 @@ func (w *rawContainerWatcher) processEvent(event *inotify.Event, events chan wat
// Derive the container name from the path name. // Derive the container name from the path name.
var containerName string var containerName string
for _, mount := range w.cgroupSubsystems.Mounts { for _, mount := range w.cgroupPaths {
mountLocation := path.Clean(mount.Mountpoint) + "/" mountLocation := path.Clean(mount) + "/"
if strings.HasPrefix(event.Name, mountLocation) { if strings.HasPrefix(event.Name, mountLocation) {
containerName = event.Name[len(mountLocation)-1:] containerName = event.Name[len(mountLocation)-1:]
break break

View File

@ -32,7 +32,7 @@ func (f *systemdFactory) String() string {
return "systemd" return "systemd"
} }
func (f *systemdFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) { func (f *systemdFactory) NewContainerHandler(name string, metadataEnvAllowList []string, inHostNamespace bool) (container.ContainerHandler, error) {
return nil, fmt.Errorf("Not yet supported") return nil, fmt.Errorf("Not yet supported")
} }

View File

@ -16,9 +16,10 @@
package install package install
import ( import (
"k8s.io/klog/v2"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/systemd" "github.com/google/cadvisor/container/systemd"
"k8s.io/klog/v2"
) )
func init() { func init() {

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build linux
// +build linux // +build linux
// Provides Filesystem Stats // Provides Filesystem Stats
@ -19,6 +20,7 @@ package fs
import ( import (
"bufio" "bufio"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -30,11 +32,12 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/google/cadvisor/devicemapper"
"github.com/google/cadvisor/utils"
zfs "github.com/mistifyio/go-zfs" zfs "github.com/mistifyio/go-zfs"
mount "github.com/moby/sys/mountinfo" mount "github.com/moby/sys/mountinfo"
"github.com/google/cadvisor/devicemapper"
"github.com/google/cadvisor/utils"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -56,6 +59,9 @@ const (
// A pool for restricting the number of consecutive `du` and `find` tasks running. // A pool for restricting the number of consecutive `du` and `find` tasks running.
var pool = make(chan struct{}, maxConcurrentOps) var pool = make(chan struct{}, maxConcurrentOps)
// ErrDeviceNotInPartitionsMap is the error resulting if a device could not be found in the partitions map.
var ErrDeviceNotInPartitionsMap = errors.New("could not find device in cached partitions map")
func init() { func init() {
for i := 0; i < maxConcurrentOps; i++ { for i := 0; i < maxConcurrentOps; i++ {
releaseToken() releaseToken()
@ -279,15 +285,17 @@ func (i *RealFsInfo) addSystemRootLabel(mounts []*mount.Info) {
// addDockerImagesLabel attempts to determine which device contains the mount for docker images. // addDockerImagesLabel attempts to determine which device contains the mount for docker images.
func (i *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) { func (i *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) {
dockerDev, dockerPartition, err := i.getDockerDeviceMapperInfo(context.Docker) if context.Docker.Driver != "" {
if err != nil { dockerDev, dockerPartition, err := i.getDockerDeviceMapperInfo(context.Docker)
klog.Warningf("Could not get Docker devicemapper device: %v", err) if err != nil {
} klog.Warningf("Could not get Docker devicemapper device: %v", err)
if len(dockerDev) > 0 && dockerPartition != nil { }
i.partitions[dockerDev] = *dockerPartition if len(dockerDev) > 0 && dockerPartition != nil {
i.labels[LabelDockerImages] = dockerDev i.partitions[dockerDev] = *dockerPartition
} else { i.labels[LabelDockerImages] = dockerDev
i.updateContainerImagesPath(LabelDockerImages, mounts, getDockerImagePaths(context)) } else {
i.updateContainerImagesPath(LabelDockerImages, mounts, getDockerImagePaths(context))
}
} }
} }
@ -582,15 +590,20 @@ func (i *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
} }
mnt, found := i.mountInfoFromDir(dir) mnt, found := i.mountInfoFromDir(dir)
if found && mnt.FSType == "btrfs" && mnt.Major == 0 && strings.HasPrefix(mnt.Source, "/dev/") { if found && strings.HasPrefix(mnt.Source, "/dev/") {
major, minor, err := getBtrfsMajorMinorIds(mnt) major, minor := mnt.Major, mnt.Minor
if err != nil {
klog.Warningf("%s", err) if mnt.FSType == "btrfs" && major == 0 {
} else { major, minor, err = getBtrfsMajorMinorIds(mnt)
return &DeviceInfo{mnt.Source, uint(major), uint(minor)}, nil if err != nil {
klog.Warningf("Unable to get btrfs mountpoint IDs: %v", err)
}
} }
return &DeviceInfo{mnt.Source, uint(major), uint(minor)}, nil
} }
return nil, fmt.Errorf("could not find device with major: %d, minor: %d in cached partitions map", major, minor)
return nil, fmt.Errorf("with major: %d, minor: %d: %w", major, minor, ErrDeviceNotInPartitionsMap)
} }
func GetDirUsage(dir string) (UsageInfo, error) { func GetDirUsage(dir string) (UsageInfo, error) {

View File

@ -963,6 +963,8 @@ type ContainerStats struct {
Resctrl ResctrlStats `json:"resctrl,omitempty"` Resctrl ResctrlStats `json:"resctrl,omitempty"`
CpuSet CPUSetStats `json:"cpuset,omitempty"` CpuSet CPUSetStats `json:"cpuset,omitempty"`
OOMEvents uint64 `json:"oom_events,omitempty"`
} }
func timeEq(t1, t2 time.Time, tolerance time.Duration) bool { func timeEq(t1, t2 time.Time, tolerance time.Duration) bool {

View File

@ -47,13 +47,16 @@ type Node struct {
} }
type Core struct { type Core struct {
Id int `json:"core_id"` Id int `json:"core_id"`
Threads []int `json:"thread_ids"` Threads []int `json:"thread_ids"`
Caches []Cache `json:"caches"` Caches []Cache `json:"caches"`
SocketID int `json:"socket_id"` UncoreCaches []Cache `json:"uncore_caches"`
SocketID int `json:"socket_id"`
} }
type Cache struct { type Cache struct {
// Id of memory cache
Id int `json:"id"`
// Size of memory cache in bytes. // Size of memory cache in bytes.
Size uint64 `json:"size"` Size uint64 `json:"size"`
// Type of memory cache: data, instruction, or unified. // Type of memory cache: data, instruction, or unified.
@ -175,6 +178,9 @@ type MachineInfo struct {
// The time of this information point. // The time of this information point.
Timestamp time.Time `json:"timestamp"` Timestamp time.Time `json:"timestamp"`
// Vendor id of CPU.
CPUVendorID string `json:"vendor_id"`
// The number of cores in this machine. // The number of cores in this machine.
NumCores int `json:"num_cores"` NumCores int `json:"num_cores"`
@ -246,6 +252,7 @@ func (m *MachineInfo) Clone() *MachineInfo {
} }
} }
copy := MachineInfo{ copy := MachineInfo{
CPUVendorID: m.CPUVendorID,
Timestamp: m.Timestamp, Timestamp: m.Timestamp,
NumCores: m.NumCores, NumCores: m.NumCores,
NumPhysicalCores: m.NumPhysicalCores, NumPhysicalCores: m.NumPhysicalCores,

View File

@ -263,7 +263,7 @@ type FsInfo struct {
} }
type RequestOptions struct { type RequestOptions struct {
// Type of container identifier specified - "name", "dockerid", dockeralias" // Type of container identifier specified - TypeName (default) or TypeDocker
IdType string `json:"type"` IdType string `json:"type"`
// Number of stats to return // Number of stats to return
Count int `json:"count"` Count int `json:"count"`

View File

@ -18,8 +18,9 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
v1 "github.com/google/cadvisor/info/v1"
) )
func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats { func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {

View File

@ -18,7 +18,7 @@ import (
// TODO(rjnagal): Move structs from v1. // TODO(rjnagal): Move structs from v1.
"time" "time"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
) )
type Attributes struct { type Attributes struct {

View File

@ -121,6 +121,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
machineInfo := &info.MachineInfo{ machineInfo := &info.MachineInfo{
Timestamp: time.Now(), Timestamp: time.Now(),
CPUVendorID: GetCPUVendorID(cpuinfo),
NumCores: numCores, NumCores: numCores,
NumPhysicalCores: GetPhysicalCores(cpuinfo), NumPhysicalCores: GetPhysicalCores(cpuinfo),
NumSockets: GetSockets(cpuinfo), NumSockets: GetSockets(cpuinfo),

View File

@ -21,6 +21,7 @@ import (
"os" "os"
"path" "path"
"regexp" "regexp"
// s390/s390x changes // s390/s390x changes
"runtime" "runtime"
"strconv" "strconv"
@ -43,6 +44,7 @@ var (
cpuClockSpeedMHz = regexp.MustCompile(`(?:cpu MHz|CPU MHz|clock)\s*:\s*([0-9]+\.[0-9]+)(?:MHz)?`) cpuClockSpeedMHz = regexp.MustCompile(`(?:cpu MHz|CPU MHz|clock)\s*:\s*([0-9]+\.[0-9]+)(?:MHz)?`)
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`) memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
swapCapacityRegexp = regexp.MustCompile(`SwapTotal:\s*([0-9]+) kB`) swapCapacityRegexp = regexp.MustCompile(`SwapTotal:\s*([0-9]+) kB`)
vendorIDRegexp = regexp.MustCompile(`vendor_id\s*:\s*(\w+)`)
cpuBusPath = "/sys/bus/cpu/devices/" cpuBusPath = "/sys/bus/cpu/devices/"
isMemoryController = regexp.MustCompile("mc[0-9]+") isMemoryController = regexp.MustCompile("mc[0-9]+")
@ -54,6 +56,21 @@ var (
const memTypeFileName = "dimm_mem_type" const memTypeFileName = "dimm_mem_type"
const sizeFileName = "size" const sizeFileName = "size"
// GetCPUVendorID returns "vendor_id" reading /proc/cpuinfo file.
func GetCPUVendorID(procInfo []byte) string {
vendorID := ""
matches := vendorIDRegexp.FindSubmatch(procInfo)
if len(matches) != 2 {
klog.Warning("Cannot read vendor id correctly, set empty.")
return vendorID
}
vendorID = string(matches[1])
return vendorID
}
// GetPhysicalCores returns number of CPU cores reading /proc/cpuinfo file or if needed information from sysfs cpu path // GetPhysicalCores returns number of CPU cores reading /proc/cpuinfo file or if needed information from sysfs cpu path
func GetPhysicalCores(procInfo []byte) int { func GetPhysicalCores(procInfo []byte) int {
numCores := getUniqueMatchesCount(string(procInfo), coreRegExp) numCores := getUniqueMatchesCount(string(procInfo), coreRegExp)

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build freebsd || darwin || linux
// +build freebsd darwin linux // +build freebsd darwin linux
package machine package machine

View File

@ -27,6 +27,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/google/cadvisor/cache/memory" "github.com/google/cadvisor/cache/memory"
@ -102,6 +103,8 @@ type containerData struct {
// resctrlCollector updates stats for resctrl controller. // resctrlCollector updates stats for resctrl controller.
resctrlCollector stats.Collector resctrlCollector stats.Collector
oomEvents uint64
} }
// jitter returns a time.Duration between duration and duration + maxFactor * duration, // jitter returns a time.Duration between duration and duration + maxFactor * duration,
@ -127,6 +130,7 @@ func (cd *containerData) Stop() error {
} }
close(cd.stop) close(cd.stop)
cd.perfCollector.Destroy() cd.perfCollector.Destroy()
cd.resctrlCollector.Destroy()
return nil return nil
} }
@ -668,6 +672,9 @@ func (cd *containerData) updateStats() error {
klog.V(2).Infof("Failed to add summary stats for %q: %v", cd.info.Name, err) klog.V(2).Infof("Failed to add summary stats for %q: %v", cd.info.Name, err)
} }
} }
stats.OOMEvents = atomic.LoadUint64(&cd.oomEvents)
var customStatsErr error var customStatsErr error
cm := cd.collectorManager.(*collector.GenericCollectorManager) cm := cd.collectorManager.(*collector.GenericCollectorManager)
if len(cm.Collectors) > 0 { if len(cm.Collectors) > 0 {
@ -721,7 +728,7 @@ func (cd *containerData) updateStats() error {
return perfStatsErr return perfStatsErr
} }
if resctrlStatsErr != nil { if resctrlStatsErr != nil {
klog.Errorf("error occurred while collecting resctrl stats for container %s: %s", cInfo.Name, err) klog.Errorf("error occurred while collecting resctrl stats for container %s: %s", cInfo.Name, resctrlStatsErr)
return resctrlStatsErr return resctrlStatsErr
} }
return customStatsErr return customStatsErr

View File

@ -24,18 +24,18 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/google/cadvisor/accelerators" "github.com/google/cadvisor/accelerators"
"github.com/google/cadvisor/cache/memory" "github.com/google/cadvisor/cache/memory"
"github.com/google/cadvisor/collector" "github.com/google/cadvisor/collector"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/docker"
"github.com/google/cadvisor/container/raw" "github.com/google/cadvisor/container/raw"
"github.com/google/cadvisor/events" "github.com/google/cadvisor/events"
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/info/v2" v2 "github.com/google/cadvisor/info/v2"
"github.com/google/cadvisor/machine" "github.com/google/cadvisor/machine"
"github.com/google/cadvisor/nvm" "github.com/google/cadvisor/nvm"
"github.com/google/cadvisor/perf" "github.com/google/cadvisor/perf"
@ -47,8 +47,6 @@ import (
"github.com/google/cadvisor/watcher" "github.com/google/cadvisor/watcher"
"github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/utils/clock" "k8s.io/utils/clock"
@ -61,6 +59,14 @@ var eventStorageAgeLimit = flag.String("event_storage_age_limit", "default=24h",
var eventStorageEventLimit = flag.String("event_storage_event_limit", "default=100000", "Max number of events to store (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or \"default\" and the value is an integer. Default is applied to all non-specified event types") var eventStorageEventLimit = flag.String("event_storage_event_limit", "default=100000", "Max number of events to store (per type). Value is a comma separated list of key values, where the keys are event types (e.g.: creation, oom) or \"default\" and the value is an integer. Default is applied to all non-specified event types")
var applicationMetricsCountLimit = flag.Int("application_metrics_count_limit", 100, "Max number of application metrics to store (per container)") var applicationMetricsCountLimit = flag.Int("application_metrics_count_limit", 100, "Max number of application metrics to store (per container)")
// The namespace under which Docker aliases are unique.
const DockerNamespace = "docker"
var HousekeepingConfigFlags = HouskeepingConfig{
flag.Duration("max_housekeeping_interval", 60*time.Second, "Largest interval to allow between container housekeepings"),
flag.Bool("allow_dynamic_housekeeping", true, "Whether to allow the housekeeping interval to be dynamic"),
}
// The Manager interface defines operations for starting a manager and getting // The Manager interface defines operations for starting a manager and getting
// container and machine information. // container and machine information.
type Manager interface { type Manager interface {
@ -129,12 +135,6 @@ type Manager interface {
CloseEventChannel(watchID int) CloseEventChannel(watchID int)
// Get status information about docker.
DockerInfo() (info.DockerStatus, error)
// Get details about interesting docker images.
DockerImages() ([]info.DockerImage, error)
// Returns debugging information. Map of lines per category. // Returns debugging information. Map of lines per category.
DebugInfo() map[string][]string DebugInfo() map[string][]string
} }
@ -146,7 +146,7 @@ type HouskeepingConfig = struct {
} }
// New takes a memory storage and returns a new manager. // New takes a memory storage and returns a new manager.
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig HouskeepingConfig, includedMetricsSet container.MetricSet, collectorHTTPClient *http.Client, rawContainerCgroupPathPrefixWhiteList []string, perfEventsFile string) (Manager, error) { func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig HouskeepingConfig, includedMetricsSet container.MetricSet, collectorHTTPClient *http.Client, rawContainerCgroupPathPrefixWhiteList, containerEnvMetadataWhiteList []string, perfEventsFile string, resctrlInterval time.Duration) (Manager, error) {
if memoryCache == nil { if memoryCache == nil {
return nil, fmt.Errorf("manager requires memory storage") return nil, fmt.Errorf("manager requires memory storage")
} }
@ -203,6 +203,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig
collectorHTTPClient: collectorHTTPClient, collectorHTTPClient: collectorHTTPClient,
nvidiaManager: accelerators.NewNvidiaManager(includedMetricsSet), nvidiaManager: accelerators.NewNvidiaManager(includedMetricsSet),
rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList, rawContainerCgroupPathPrefixWhiteList: rawContainerCgroupPathPrefixWhiteList,
containerEnvMetadataWhiteList: containerEnvMetadataWhiteList,
} }
machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace) machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace)
@ -217,7 +218,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, houskeepingConfig
return nil, err return nil, err
} }
newManager.resctrlManager, err = resctrl.NewManager(selfContainer) newManager.resctrlManager, err = resctrl.NewManager(resctrlInterval, resctrl.Setup, machineInfo.CPUVendorID, inHostNamespace)
if err != nil { if err != nil {
klog.V(4).Infof("Cannot gather resctrl metrics: %v", err) klog.V(4).Infof("Cannot gather resctrl metrics: %v", err)
} }
@ -262,9 +263,11 @@ type manager struct {
collectorHTTPClient *http.Client collectorHTTPClient *http.Client
nvidiaManager stats.Manager nvidiaManager stats.Manager
perfManager stats.Manager perfManager stats.Manager
resctrlManager stats.Manager resctrlManager resctrl.Manager
// List of raw container cgroup path prefix whitelist. // List of raw container cgroup path prefix whitelist.
rawContainerCgroupPathPrefixWhiteList []string rawContainerCgroupPathPrefixWhiteList []string
// List of container env prefix whitelist, the matched container envs would be collected into metrics as extra labels.
containerEnvMetadataWhiteList []string
} }
// Start the container manager. // Start the container manager.
@ -327,7 +330,7 @@ func (m *manager) Start() error {
func (m *manager) Stop() error { func (m *manager) Stop() error {
defer m.nvidiaManager.Destroy() defer m.nvidiaManager.Destroy()
defer m.destroyPerfCollectors() defer m.destroyCollectors()
// Stop and wait on all quit channels. // Stop and wait on all quit channels.
for i, c := range m.quitChannels { for i, c := range m.quitChannels {
// Send the exit signal and wait on the thread to exit (by closing the channel). // Send the exit signal and wait on the thread to exit (by closing the channel).
@ -345,9 +348,10 @@ func (m *manager) Stop() error {
return nil return nil
} }
func (m *manager) destroyPerfCollectors() { func (m *manager) destroyCollectors() {
for _, container := range m.containers { for _, container := range m.containers {
container.perfCollector.Destroy() container.perfCollector.Destroy()
container.resctrlCollector.Destroy()
} }
} }
@ -590,7 +594,7 @@ func (m *manager) getAllDockerContainers() map[string]*containerData {
// Get containers in the Docker namespace. // Get containers in the Docker namespace.
for name, cont := range m.containers { for name, cont := range m.containers {
if name.Namespace == docker.DockerNamespace { if name.Namespace == DockerNamespace {
containers[cont.info.Name] = cont containers[cont.info.Name] = cont
} }
} }
@ -622,14 +626,14 @@ func (m *manager) getDockerContainer(containerName string) (*containerData, erro
// Check for the container in the Docker container namespace. // Check for the container in the Docker container namespace.
cont, ok := m.containers[namespacedContainerName{ cont, ok := m.containers[namespacedContainerName{
Namespace: docker.DockerNamespace, Namespace: DockerNamespace,
Name: containerName, Name: containerName,
}] }]
// Look for container by short prefix name if no exact match found. // Look for container by short prefix name if no exact match found.
if !ok { if !ok {
for contName, c := range m.containers { for contName, c := range m.containers {
if contName.Namespace == docker.DockerNamespace && strings.HasPrefix(contName.Name, containerName) { if contName.Namespace == DockerNamespace && strings.HasPrefix(contName.Name, containerName) {
if cont == nil { if cont == nil {
cont = c cont = c
} else { } else {
@ -692,6 +696,10 @@ func (m *manager) GetRequestedContainersInfo(containerName string, options v2.Re
for name, data := range containers { for name, data := range containers {
info, err := m.containerDataToContainerInfo(data, &query) info, err := m.containerDataToContainerInfo(data, &query)
if err != nil { if err != nil {
if err == memory.ErrDataNotFound {
klog.Warningf("Error getting data for container %s because of race condition", name)
continue
}
errs.append(name, "containerDataToContainerInfo", err) errs.append(name, "containerDataToContainerInfo", err)
} }
containersMap[name] = info containersMap[name] = info
@ -908,7 +916,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
return nil return nil
} }
handler, accept, err := container.NewContainerHandler(containerName, watchSource, m.inHostNamespace) handler, accept, err := container.NewContainerHandler(containerName, watchSource, m.containerEnvMetadataWhiteList, m.inHostNamespace)
if err != nil { if err != nil {
return err return err
} }
@ -928,13 +936,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
return err return err
} }
if cgroups.IsCgroup2UnifiedMode() { if !cgroups.IsCgroup2UnifiedMode() {
perfCgroupPath := path.Join(fs2.UnifiedMountpoint, containerName)
cont.perfCollector, err = m.perfManager.GetCollector(perfCgroupPath)
if err != nil {
klog.Errorf("Perf event metrics will not be available for container %q: %v", containerName, err)
}
} else {
devicesCgroupPath, err := handler.GetCgroupPath("devices") devicesCgroupPath, err := handler.GetCgroupPath("devices")
if err != nil { if err != nil {
klog.Warningf("Error getting devices cgroup path: %v", err) klog.Warningf("Error getting devices cgroup path: %v", err)
@ -944,6 +946,8 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %s: %s", cont.info.Name, err) klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %s: %s", cont.info.Name, err)
} }
} }
}
if m.includedMetrics.Has(container.PerfMetrics) {
perfCgroupPath, err := handler.GetCgroupPath("perf_event") perfCgroupPath, err := handler.GetCgroupPath("perf_event")
if err != nil { if err != nil {
klog.Warningf("Error getting perf_event cgroup path: %q", err) klog.Warningf("Error getting perf_event cgroup path: %q", err)
@ -956,14 +960,11 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
} }
if m.includedMetrics.Has(container.ResctrlMetrics) { if m.includedMetrics.Has(container.ResctrlMetrics) {
resctrlPath, err := intelrdt.GetIntelRdtPath(containerName) cont.resctrlCollector, err = m.resctrlManager.GetCollector(containerName, func() ([]string, error) {
return cont.getContainerPids(m.inHostNamespace)
}, len(m.machineInfo.Topology))
if err != nil { if err != nil {
klog.V(4).Infof("Error getting resctrl path: %q", err) klog.V(4).Infof("resctrl metrics will not be available for container %s: %s", cont.info.Name, err)
} else {
cont.resctrlCollector, err = m.resctrlManager.GetCollector(resctrlPath)
if err != nil {
klog.V(4).Infof("resctrl metrics will not be available for container %s: %s", cont.info.Name, err)
}
} }
} }
@ -1005,7 +1006,6 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
if err != nil { if err != nil {
return err return err
} }
// Start the container's housekeeping. // Start the container's housekeeping.
return cont.Start() return cont.Start()
} }
@ -1237,6 +1237,24 @@ func (m *manager) watchForNewOoms() error {
if err != nil { if err != nil {
klog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err) klog.Errorf("failed to add OOM kill event for %q: %v", oomInstance.ContainerName, err)
} }
// Count OOM events for later collection by prometheus
request := v2.RequestOptions{
IdType: v2.TypeName,
Count: 1,
}
conts, err := m.getRequestedContainers(oomInstance.ContainerName, request)
if err != nil {
klog.V(2).Infof("failed getting container info for %q: %v", oomInstance.ContainerName, err)
continue
}
if len(conts) != 1 {
klog.V(2).Info("Expected the request to match only one container")
continue
}
for _, cont := range conts {
atomic.AddUint64(&cont.oomEvents, 1)
}
} }
}() }()
return nil return nil
@ -1304,14 +1322,6 @@ func parseEventsStoragePolicy() events.StoragePolicy {
return policy return policy
} }
func (m *manager) DockerImages() ([]info.DockerImage, error) {
return docker.Images()
}
func (m *manager) DockerInfo() (info.DockerStatus, error) {
return docker.Status()
}
func (m *manager) DebugInfo() map[string][]string { func (m *manager) DebugInfo() map[string][]string {
debugInfo := container.DebugInfo() debugInfo := container.DebugInfo()
@ -1368,20 +1378,10 @@ func getVersionInfo() (*info.VersionInfo, error) {
kernelVersion := machine.KernelVersion() kernelVersion := machine.KernelVersion()
osVersion := machine.ContainerOsVersion() osVersion := machine.ContainerOsVersion()
dockerVersion, err := docker.VersionString()
if err != nil {
return nil, err
}
dockerAPIVersion, err := docker.APIVersionString()
if err != nil {
return nil, err
}
return &info.VersionInfo{ return &info.VersionInfo{
KernelVersion: kernelVersion, KernelVersion: kernelVersion,
ContainerOsVersion: osVersion, ContainerOsVersion: osVersion,
DockerVersion: dockerVersion,
DockerAPIVersion: dockerAPIVersion,
CadvisorVersion: version.Info["version"], CadvisorVersion: version.Info["version"],
CadvisorRevision: version.Info["revision"], CadvisorRevision: version.Info["revision"],
}, nil }, nil

View File

@ -1757,6 +1757,17 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc, includedMetri
}, },
}...) }...)
} }
if includedMetrics.Has(container.OOMMetrics) {
c.containerMetrics = append(c.containerMetrics, containerMetric{
name: "container_oom_events_total",
help: "Count of out of memory events observed for the container",
valueType: prometheus.CounterValue,
getValues: func(s *info.ContainerStats) metricValues {
return metricValues{{value: float64(s.OOMEvents), timestamp: s.Timestamp}}
},
})
}
return c return c
} }
@ -1825,7 +1836,7 @@ func DefaultContainerLabels(container *info.ContainerInfo) map[string]string {
} }
// BaseContainerLabels returns a ContainerLabelsFunc that exports the container // BaseContainerLabels returns a ContainerLabelsFunc that exports the container
// name, first alias, image name as well as white listed label values. // name, first alias, image name as well as all its white listed env and label values.
func BaseContainerLabels(whiteList []string) func(container *info.ContainerInfo) map[string]string { func BaseContainerLabels(whiteList []string) func(container *info.ContainerInfo) map[string]string {
whiteListMap := make(map[string]struct{}, len(whiteList)) whiteListMap := make(map[string]struct{}, len(whiteList))
for _, k := range whiteList { for _, k := range whiteList {
@ -1845,6 +1856,9 @@ func BaseContainerLabels(whiteList []string) func(container *info.ContainerInfo)
set[ContainerLabelPrefix+k] = v set[ContainerLabelPrefix+k] = v
} }
} }
for k, v := range container.Spec.Envs {
set[ContainerEnvPrefix+k] = v
}
return set return set
} }
} }

View File

@ -17,9 +17,10 @@ package metrics
import ( import (
"strconv" "strconv"
"github.com/prometheus/client_golang/prometheus"
"github.com/google/cadvisor/container" "github.com/google/cadvisor/container"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
@ -334,6 +335,14 @@ func getCaches(machineInfo *info.MachineInfo) metricValues {
timestamp: machineInfo.Timestamp, timestamp: machineInfo.Timestamp,
}) })
} }
for _, cache := range core.UncoreCaches {
mValues = append(mValues,
metricValue{
value: float64(cache.Size),
labels: []string{nodeID, coreID, cache.Type, strconv.Itoa(cache.Level)},
timestamp: machineInfo.Timestamp,
})
}
} }
for _, cache := range node.Caches { for _, cache := range node.Caches {

View File

@ -1,3 +1,4 @@
//go:build libipmctl && cgo
// +build libipmctl,cgo // +build libipmctl,cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.
@ -21,9 +22,10 @@ package nvm
import "C" import "C"
import ( import (
"fmt" "fmt"
info "github.com/google/cadvisor/info/v1"
"sync" "sync"
info "github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )

View File

@ -1,3 +1,4 @@
//go:build !libipmctl || !cgo
// +build !libipmctl !cgo // +build !libipmctl !cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.
@ -17,8 +18,9 @@
package nvm package nvm
import ( import (
info "github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
info "github.com/google/cadvisor/info/v1"
) )
// GetInfo returns information specific for non-volatile memory modules. // GetInfo returns information specific for non-volatile memory modules.

View File

@ -1,3 +1,4 @@
//go:build libpfm && cgo
// +build libpfm,cgo // +build libpfm,cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.
@ -47,6 +48,10 @@ type collector struct {
onlineCPUs []int onlineCPUs []int
eventToCustomEvent map[Event]*CustomEvent eventToCustomEvent map[Event]*CustomEvent
uncore stats.Collector uncore stats.Collector
// Handle for mocking purposes.
perfEventOpen func(attr *unix.PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
ioctlSetInt func(fd int, req uint, value int) error
} }
type group struct { type group struct {
@ -76,7 +81,7 @@ func init() {
} }
func newCollector(cgroupPath string, events PerfEvents, onlineCPUs []int, cpuToSocket map[int]int) *collector { func newCollector(cgroupPath string, events PerfEvents, onlineCPUs []int, cpuToSocket map[int]int) *collector {
collector := &collector{cgroupPath: cgroupPath, events: events, onlineCPUs: onlineCPUs, cpuFiles: map[int]group{}, uncore: NewUncoreCollector(cgroupPath, events, cpuToSocket)} collector := &collector{cgroupPath: cgroupPath, events: events, onlineCPUs: onlineCPUs, cpuFiles: map[int]group{}, uncore: NewUncoreCollector(cgroupPath, events, cpuToSocket), perfEventOpen: unix.PerfEventOpen, ioctlSetInt: unix.IoctlSetInt}
mapEventsToCustomEvents(collector) mapEventsToCustomEvents(collector)
return collector return collector
} }
@ -185,44 +190,30 @@ func (c *collector) setup() error {
c.cpuFilesLock.Lock() c.cpuFilesLock.Lock()
defer c.cpuFilesLock.Unlock() defer c.cpuFilesLock.Unlock()
cgroupFd := int(cgroup.Fd()) cgroupFd := int(cgroup.Fd())
for i, group := range c.events.Core.Events { groupIndex := 0
for _, group := range c.events.Core.Events {
// CPUs file descriptors of group leader needed for perf_event_open. // CPUs file descriptors of group leader needed for perf_event_open.
leaderFileDescriptors := make(map[int]int, len(c.onlineCPUs)) leaderFileDescriptors := make(map[int]int, len(c.onlineCPUs))
for _, cpu := range c.onlineCPUs { for _, cpu := range c.onlineCPUs {
leaderFileDescriptors[cpu] = groupLeaderFileDescriptor leaderFileDescriptors[cpu] = groupLeaderFileDescriptor
} }
for j, event := range group.events { leaderFileDescriptors, err := c.createLeaderFileDescriptors(group.events, cgroupFd, groupIndex, leaderFileDescriptors)
// First element is group leader. if err != nil {
isGroupLeader := j == 0 klog.Errorf("Cannot count perf event group %v: %v", group.events, err)
customEvent, ok := c.eventToCustomEvent[event] c.deleteGroup(groupIndex)
if ok { continue
config := c.createConfigFromRawEvent(customEvent) } else {
leaderFileDescriptors, err = c.registerEvent(eventInfo{string(customEvent.Name), config, cgroupFd, i, isGroupLeader}, leaderFileDescriptors) groupIndex++
if err != nil {
return err
}
} else {
config, err := c.createConfigFromEvent(event)
if err != nil {
return err
}
leaderFileDescriptors, err = c.registerEvent(eventInfo{string(event), config, cgroupFd, i, isGroupLeader}, leaderFileDescriptors)
if err != nil {
return err
}
// Clean memory allocated by C code.
C.free(unsafe.Pointer(config))
}
} }
// Group is prepared so we should reset and enable counting. // Group is prepared so we should reset and enable counting.
for _, fd := range leaderFileDescriptors { for _, fd := range leaderFileDescriptors {
err = unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_RESET, 0) err = c.ioctlSetInt(fd, unix.PERF_EVENT_IOC_RESET, 0)
if err != nil { if err != nil {
return err return err
} }
err = unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_ENABLE, 0) err = c.ioctlSetInt(fd, unix.PERF_EVENT_IOC_ENABLE, 0)
if err != nil { if err != nil {
return err return err
} }
@ -232,6 +223,35 @@ func (c *collector) setup() error {
return nil return nil
} }
func (c *collector) createLeaderFileDescriptors(events []Event, cgroupFd int, groupIndex int, leaderFileDescriptors map[int]int) (map[int]int, error) {
for j, event := range events {
// First element is group leader.
isGroupLeader := j == 0
customEvent, ok := c.eventToCustomEvent[event]
var err error
if ok {
config := c.createConfigFromRawEvent(customEvent)
leaderFileDescriptors, err = c.registerEvent(eventInfo{string(customEvent.Name), config, cgroupFd, groupIndex, isGroupLeader}, leaderFileDescriptors)
if err != nil {
return nil, fmt.Errorf("cannot register perf event: %v", err)
}
} else {
config, err := c.createConfigFromEvent(event)
if err != nil {
return nil, fmt.Errorf("cannot create config from perf event: %v", err)
}
leaderFileDescriptors, err = c.registerEvent(eventInfo{string(event), config, cgroupFd, groupIndex, isGroupLeader}, leaderFileDescriptors)
if err != nil {
return nil, fmt.Errorf("cannot register perf event: %v", err)
}
// Clean memory allocated by C code.
C.free(unsafe.Pointer(config))
}
}
return leaderFileDescriptors, nil
}
func readPerfEventAttr(name string, pfmGetOsEventEncoding func(string, unsafe.Pointer) error) (*unix.PerfEventAttr, error) { func readPerfEventAttr(name string, pfmGetOsEventEncoding func(string, unsafe.Pointer) error) (*unix.PerfEventAttr, error) {
perfEventAttrMemory := C.malloc(C.ulong(unsafe.Sizeof(unix.PerfEventAttr{}))) perfEventAttrMemory := C.malloc(C.ulong(unsafe.Sizeof(unix.PerfEventAttr{})))
// Fill memory with 0 values. // Fill memory with 0 values.
@ -279,13 +299,13 @@ func (c *collector) registerEvent(event eventInfo, leaderFileDescriptors map[int
setAttributes(event.config, event.isGroupLeader) setAttributes(event.config, event.isGroupLeader)
for _, cpu := range c.onlineCPUs { for _, cpu := range c.onlineCPUs {
fd, err := unix.PerfEventOpen(event.config, pid, cpu, leaderFileDescriptors[cpu], flags) fd, err := c.perfEventOpen(event.config, pid, cpu, leaderFileDescriptors[cpu], flags)
if err != nil { if err != nil {
return nil, fmt.Errorf("setting up perf event %#v failed: %q", event.config, err) return leaderFileDescriptors, fmt.Errorf("setting up perf event %#v failed: %q", event.config, err)
} }
perfFile := os.NewFile(uintptr(fd), event.name) perfFile := os.NewFile(uintptr(fd), event.name)
if perfFile == nil { if perfFile == nil {
return nil, fmt.Errorf("unable to create os.File from file descriptor %#v", fd) return leaderFileDescriptors, fmt.Errorf("unable to create os.File from file descriptor %#v", fd)
} }
c.addEventFile(event.groupIndex, event.name, cpu, perfFile) c.addEventFile(event.groupIndex, event.name, cpu, perfFile)
@ -333,6 +353,19 @@ func (c *collector) addEventFile(index int, name string, cpu int, perfFile *os.F
} }
} }
func (c *collector) deleteGroup(index int) {
for name, files := range c.cpuFiles[index].cpuFiles {
for cpu, file := range files {
klog.V(5).Infof("Closing perf event file descriptor for cgroup %q, event %q and CPU %d", c.cgroupPath, name, cpu)
err := file.Close()
if err != nil {
klog.Warningf("Unable to close perf event file descriptor for cgroup %q, event %q and CPU %d", c.cgroupPath, name, cpu)
}
}
}
delete(c.cpuFiles, index)
}
func createPerfEventAttr(event CustomEvent) *unix.PerfEventAttr { func createPerfEventAttr(event CustomEvent) *unix.PerfEventAttr {
length := len(event.Config) length := len(event.Config)
@ -369,17 +402,8 @@ func (c *collector) Destroy() {
c.cpuFilesLock.Lock() c.cpuFilesLock.Lock()
defer c.cpuFilesLock.Unlock() defer c.cpuFilesLock.Unlock()
for _, group := range c.cpuFiles { for i := range c.cpuFiles {
for name, files := range group.cpuFiles { c.deleteGroup(i)
for cpu, file := range files {
klog.V(5).Infof("Closing perf_event file descriptor for cgroup %q, event %q and CPU %d", c.cgroupPath, name, cpu)
err := file.Close()
if err != nil {
klog.Warningf("Unable to close perf_event file descriptor for cgroup %q, event %q and CPU %d", c.cgroupPath, name, cpu)
}
}
delete(group.cpuFiles, name)
}
} }
} }

View File

@ -1,3 +1,4 @@
//go:build !libpfm || !cgo
// +build !libpfm !cgo // +build !libpfm !cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.

View File

@ -1,3 +1,4 @@
//go:build libpfm && cgo
// +build libpfm,cgo // +build libpfm,cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.
@ -48,6 +49,10 @@ func NewManager(configFile string, topology []info.Node) (stats.Manager, error)
return nil, fmt.Errorf("unable to parse configuration file %q: %w", configFile, err) return nil, fmt.Errorf("unable to parse configuration file %q: %w", configFile, err)
} }
if len(config.Core.Events) == 0 && len(config.Uncore.Events) == 0 {
return nil, fmt.Errorf("there is no events in config file %q", configFile)
}
onlineCPUs := sysinfo.GetOnlineCPUs(topology) onlineCPUs := sysinfo.GetOnlineCPUs(topology)
cpuToSocket := make(map[int]int) cpuToSocket := make(map[int]int)

View File

@ -1,3 +1,4 @@
//go:build !libpfm || !cgo
// +build !libpfm !cgo // +build !libpfm !cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.

View File

@ -1,3 +1,4 @@
//go:build libpfm && cgo
// +build libpfm,cgo // +build libpfm,cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.

View File

@ -1,3 +1,4 @@
//go:build libpfm && cgo
// +build libpfm,cgo // +build libpfm,cgo
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2020 Google Inc. All Rights Reserved.
@ -158,6 +159,28 @@ func NewUncoreCollector(cgroupPath string, events PerfEvents, cpuToSocket map[in
return collector return collector
} }
func (c *uncoreCollector) createLeaderFileDescriptors(events []Event, groupIndex int, groupPMUs map[Event]uncorePMUs,
leaderFileDescriptors map[string]map[uint32]int) (map[string]map[uint32]int, error) {
var err error
for _, event := range events {
eventName, _ := parseEventName(string(event))
customEvent, ok := c.eventToCustomEvent[event]
if ok {
err = c.setupRawEvent(customEvent, groupPMUs[event], groupIndex, leaderFileDescriptors)
} else {
err = c.setupEvent(eventName, groupPMUs[event], groupIndex, leaderFileDescriptors)
}
if err != nil {
break
}
}
if err != nil {
c.deleteGroup(groupIndex)
return nil, fmt.Errorf("cannot create config from perf event: %v", err)
}
return leaderFileDescriptors, nil
}
func (c *uncoreCollector) setup(events PerfEvents, devicesPath string) error { func (c *uncoreCollector) setup(events PerfEvents, devicesPath string) error {
readUncorePMUs, err := getUncorePMUs(devicesPath) readUncorePMUs, err := getUncorePMUs(devicesPath)
if err != nil { if err != nil {
@ -190,21 +213,11 @@ func (c *uncoreCollector) setup(events PerfEvents, devicesPath string) error {
leaderFileDescriptors[pmu.name][cpu] = groupLeaderFileDescriptor leaderFileDescriptors[pmu.name][cpu] = groupLeaderFileDescriptor
} }
} }
leaderFileDescriptors, err = c.createLeaderFileDescriptors(group.events, i, groupPMUs, leaderFileDescriptors)
for _, event := range group.events { if err != nil {
eventName, _ := parseEventName(string(event)) klog.Error(err)
customEvent, ok := c.eventToCustomEvent[event] continue
if ok {
err = c.setupRawEvent(customEvent, groupPMUs[event], i, leaderFileDescriptors)
} else {
err = c.setupEvent(eventName, groupPMUs[event], i, leaderFileDescriptors)
}
if err != nil {
return err
}
} }
// Group is prepared so we should reset and enable counting. // Group is prepared so we should reset and enable counting.
for _, pmuCPUs := range leaderFileDescriptors { for _, pmuCPUs := range leaderFileDescriptors {
for _, fd := range pmuCPUs { for _, fd := range pmuCPUs {
@ -320,20 +333,8 @@ func (c *uncoreCollector) Destroy() {
c.cpuFilesLock.Lock() c.cpuFilesLock.Lock()
defer c.cpuFilesLock.Unlock() defer c.cpuFilesLock.Unlock()
for groupIndex, groupPMUs := range c.cpuFiles { for groupIndex := range c.cpuFiles {
for pmu, group := range groupPMUs { c.deleteGroup(groupIndex)
for name, cpus := range group.cpuFiles {
for cpu, file := range cpus {
klog.V(5).Infof("Closing uncore perf_event file descriptor for event %q, PMU %s and CPU %d", name, pmu, cpu)
err := file.Close()
if err != nil {
klog.Warningf("Unable to close perf_event file descriptor for event %q, PMU %s and CPU %d", name, pmu, cpu)
}
}
delete(group.cpuFiles, name)
}
delete(groupPMUs, pmu)
}
delete(c.cpuFiles, groupIndex) delete(c.cpuFiles, groupIndex)
} }
} }
@ -475,6 +476,24 @@ func (c *uncoreCollector) setupRawEvent(event *CustomEvent, pmus uncorePMUs, gro
return nil return nil
} }
func (c *uncoreCollector) deleteGroup(groupIndex int) {
groupPMUs := c.cpuFiles[groupIndex]
for pmu, group := range groupPMUs {
for name, cpus := range group.cpuFiles {
for cpu, file := range cpus {
klog.V(5).Infof("Closing uncore perf event file descriptor for event %q, PMU %s and CPU %d", name, pmu, cpu)
err := file.Close()
if err != nil {
klog.Warningf("Unable to close perf event file descriptor for event %q, PMU %s and CPU %d", name, pmu, cpu)
}
}
delete(group.cpuFiles, name)
}
delete(groupPMUs, pmu)
}
delete(c.cpuFiles, groupIndex)
}
func readPerfUncoreStat(file readerCloser, group group, cpu int, pmu string, cpuToSocket map[int]int) ([]info.PerfUncoreStat, error) { func readPerfUncoreStat(file readerCloser, group group, cpu int, pmu string, cpuToSocket map[int]int) ([]info.PerfUncoreStat, error) {
values, err := getPerfValues(file, group) values, err := getPerfValues(file, group)
if err != nil { if err != nil {

View File

@ -1,6 +1,7 @@
//go:build linux
// +build linux // +build linux
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2021 Google Inc. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,57 +19,153 @@
package resctrl package resctrl
import ( import (
info "github.com/google/cadvisor/info/v1" "fmt"
"github.com/google/cadvisor/stats" "os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/opencontainers/runc/libcontainer/configs" "k8s.io/klog/v2"
"github.com/opencontainers/runc/libcontainer/intelrdt"
info "github.com/google/cadvisor/info/v1"
) )
const noInterval = 0
type collector struct { type collector struct {
resctrl intelrdt.Manager id string
stats.NoopDestroy interval time.Duration
getContainerPids func() ([]string, error)
resctrlPath string
running bool
destroyed bool
numberOfNUMANodes int
vendorID string
mu sync.Mutex
inHostNamespace bool
} }
func newCollector(id string, resctrlPath string) *collector { func newCollector(id string, getContainerPids func() ([]string, error), interval time.Duration, numberOfNUMANodes int, vendorID string, inHostNamespace bool) *collector {
collector := &collector{ return &collector{id: id, interval: interval, getContainerPids: getContainerPids, numberOfNUMANodes: numberOfNUMANodes,
resctrl: intelrdt.NewManager( vendorID: vendorID, mu: sync.Mutex{}, inHostNamespace: inHostNamespace}
&configs.Config{
IntelRdt: &configs.IntelRdt{},
},
id,
resctrlPath,
),
}
return collector
} }
func (c *collector) UpdateStats(stats *info.ContainerStats) error { func (c *collector) setup() error {
stats.Resctrl = info.ResctrlStats{} var err error
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
resctrlStats, err := c.resctrl.GetStats() if c.interval != noInterval {
if err != nil { if err != nil {
return err klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
} } else {
c.running = true
numberOfNUMANodes := len(*resctrlStats.MBMStats) }
go func() {
stats.Resctrl.MemoryBandwidth = make([]info.MemoryBandwidthStats, 0, numberOfNUMANodes) for {
stats.Resctrl.Cache = make([]info.CacheStats, 0, numberOfNUMANodes) time.Sleep(c.interval)
c.mu.Lock()
for _, numaNodeStats := range *resctrlStats.MBMStats { if c.destroyed {
stats.Resctrl.MemoryBandwidth = append(stats.Resctrl.MemoryBandwidth, break
info.MemoryBandwidthStats{ }
TotalBytes: numaNodeStats.MBMTotalBytes, klog.V(5).Infof("Trying to check %q containers control group.", c.id)
LocalBytes: numaNodeStats.MBMLocalBytes, if c.running {
}) err = c.checkMonitoringGroup()
} if err != nil {
c.running = false
for _, numaNodeStats := range *resctrlStats.CMTStats { klog.Errorf("Failed to check %q resctrl collector control group: %s \n Trying again in next intervals.", c.id, err)
stats.Resctrl.Cache = append(stats.Resctrl.Cache, }
info.CacheStats{LLCOccupancy: numaNodeStats.LLCOccupancy}) } else {
c.resctrlPath, err = prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
if err != nil {
c.running = false
klog.Errorf("Failed to setup container %q resctrl collector: %s \n Trying again in next intervals.", c.id, err)
}
}
c.mu.Unlock()
}
}()
} else {
// There is no interval set, if setup fail, stop.
if err != nil {
return fmt.Errorf("failed to setup container %q resctrl collector: %w", c.id, err)
}
c.running = true
} }
return nil return nil
} }
func (c *collector) checkMonitoringGroup() error {
newPath, err := prepareMonitoringGroup(c.id, c.getContainerPids, c.inHostNamespace)
if err != nil {
return fmt.Errorf("couldn't obtain mon_group path: %v", err)
}
// Check if container moved between control groups.
if newPath != c.resctrlPath {
err = c.clear()
if err != nil {
return fmt.Errorf("couldn't clear previous monitoring group: %w", err)
}
c.resctrlPath = newPath
}
return nil
}
func (c *collector) UpdateStats(stats *info.ContainerStats) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.running {
stats.Resctrl = info.ResctrlStats{}
resctrlStats, err := getIntelRDTStatsFrom(c.resctrlPath, c.vendorID)
if err != nil {
return err
}
stats.Resctrl.MemoryBandwidth = make([]info.MemoryBandwidthStats, 0, c.numberOfNUMANodes)
stats.Resctrl.Cache = make([]info.CacheStats, 0, c.numberOfNUMANodes)
for _, numaNodeStats := range *resctrlStats.MBMStats {
stats.Resctrl.MemoryBandwidth = append(stats.Resctrl.MemoryBandwidth,
info.MemoryBandwidthStats{
TotalBytes: numaNodeStats.MBMTotalBytes,
LocalBytes: numaNodeStats.MBMLocalBytes,
})
}
for _, numaNodeStats := range *resctrlStats.CMTStats {
stats.Resctrl.Cache = append(stats.Resctrl.Cache,
info.CacheStats{LLCOccupancy: numaNodeStats.LLCOccupancy})
}
}
return nil
}
func (c *collector) Destroy() {
c.mu.Lock()
defer c.mu.Unlock()
c.running = false
err := c.clear()
if err != nil {
klog.Errorf("trying to destroy %q resctrl collector but: %v", c.id, err)
}
c.destroyed = true
}
func (c *collector) clear() error {
// Not allowed to remove root or undefined resctrl directory.
if c.id != rootContainer && c.resctrlPath != "" {
// Remove only own prepared mon group.
if strings.HasPrefix(filepath.Base(c.resctrlPath), monGroupPrefix) {
err := os.RemoveAll(c.resctrlPath)
if err != nil {
return fmt.Errorf("couldn't clear mon_group: %v", err)
}
}
}
return nil
}

View File

@ -1,6 +1,7 @@
//go:build linux
// +build linux // +build linux
// Copyright 2020 Google Inc. All Rights Reserved. // Copyright 2021 Google Inc. All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -18,31 +19,61 @@
package resctrl package resctrl
import ( import (
"os" "errors"
"time"
"k8s.io/klog/v2"
"github.com/google/cadvisor/container/raw"
"github.com/google/cadvisor/stats" "github.com/google/cadvisor/stats"
"github.com/opencontainers/runc/libcontainer/intelrdt"
) )
type manager struct { type Manager interface {
id string Destroy()
stats.NoopDestroy GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error)
} }
func (m manager) GetCollector(resctrlPath string) (stats.Collector, error) { type manager struct {
if _, err := os.Stat(resctrlPath); err != nil { stats.NoopDestroy
interval time.Duration
vendorID string
inHostNamespace bool
}
func (m *manager) GetCollector(containerName string, getContainerPids func() ([]string, error), numberOfNUMANodes int) (stats.Collector, error) {
collector := newCollector(containerName, getContainerPids, m.interval, numberOfNUMANodes, m.vendorID, m.inHostNamespace)
err := collector.setup()
if err != nil {
return &stats.NoopCollector{}, err return &stats.NoopCollector{}, err
} }
collector := newCollector(m.id, resctrlPath)
return collector, nil return collector, nil
} }
func NewManager(id string) (stats.Manager, error) { func NewManager(interval time.Duration, setup func() error, vendorID string, inHostNamespace bool) (Manager, error) {
err := setup()
if intelrdt.IsMBMEnabled() || intelrdt.IsCMTEnabled() { if err != nil {
return &manager{id: id}, nil return &NoopManager{}, err
} }
return &stats.NoopManager{}, nil if !isResctrlInitialized {
return &NoopManager{}, errors.New("the resctrl isn't initialized")
}
if !(enabledCMT || enabledMBM) {
return &NoopManager{}, errors.New("there are no monitoring features available")
}
if !*raw.DockerOnly {
klog.Warning("--docker_only should be set when collecting Resctrl metrics! See the runtime docs.")
}
return &manager{interval: interval, vendorID: vendorID, inHostNamespace: inHostNamespace}, nil
}
type NoopManager struct {
stats.NoopDestroy
}
func (np *NoopManager) GetCollector(_ string, _ func() ([]string, error), _ int) (stats.Collector, error) {
return &stats.NoopCollector{}, nil
} }

366
vendor/github.com/google/cadvisor/resctrl/utils.go generated vendored Normal file
View File

@ -0,0 +1,366 @@
//go:build linux
// +build linux
// Copyright 2021 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Utilities.
package resctrl
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/intelrdt"
)
const (
cpuCgroup = "cpu"
rootContainer = "/"
monitoringGroupDir = "mon_groups"
processTask = "task"
cpusFileName = "cpus"
cpusListFileName = "cpus_list"
schemataFileName = "schemata"
tasksFileName = "tasks"
infoDirName = "info"
monDataDirName = "mon_data"
monGroupsDirName = "mon_groups"
noPidsPassedError = "there are no pids passed"
noContainerNameError = "there are no container name passed"
noControlGroupFoundError = "couldn't find control group matching container"
llcOccupancyFileName = "llc_occupancy"
mbmLocalBytesFileName = "mbm_local_bytes"
mbmTotalBytesFileName = "mbm_total_bytes"
containerPrefix = '/'
minContainerNameLen = 2 // "/<container_name>" e.g. "/a"
unavailable = "Unavailable"
monGroupPrefix = "cadvisor"
)
var (
rootResctrl = ""
pidsPath = ""
processPath = "/proc"
enabledMBM = false
enabledCMT = false
isResctrlInitialized = false
groupDirectories = map[string]struct{}{
cpusFileName: {},
cpusListFileName: {},
infoDirName: {},
monDataDirName: {},
monGroupsDirName: {},
schemataFileName: {},
tasksFileName: {},
}
)
func Setup() error {
var err error
rootResctrl, err = intelrdt.GetIntelRdtPath(rootContainer)
if err != nil {
return fmt.Errorf("unable to initialize resctrl: %v", err)
}
if cgroups.IsCgroup2UnifiedMode() {
pidsPath = fs2.UnifiedMountpoint
} else {
pidsPath = filepath.Join(fs2.UnifiedMountpoint, cpuCgroup)
}
enabledMBM = intelrdt.IsMBMEnabled()
enabledCMT = intelrdt.IsCMTEnabled()
isResctrlInitialized = true
return nil
}
func prepareMonitoringGroup(containerName string, getContainerPids func() ([]string, error), inHostNamespace bool) (string, error) {
if containerName == rootContainer {
return rootResctrl, nil
}
pids, err := getContainerPids()
if err != nil {
return "", err
}
if len(pids) == 0 {
return "", fmt.Errorf("couldn't obtain %q container pids: there is no pids in cgroup", containerName)
}
// Firstly, find the control group to which the container belongs.
// Consider the root group.
controlGroupPath, err := findGroup(rootResctrl, pids, true, false)
if err != nil {
return "", fmt.Errorf("%q %q: %q", noControlGroupFoundError, containerName, err)
}
if controlGroupPath == "" {
return "", fmt.Errorf("%q %q", noControlGroupFoundError, containerName)
}
// Check if there is any monitoring group.
monGroupPath, err := findGroup(filepath.Join(controlGroupPath, monGroupsDirName), pids, false, true)
if err != nil {
return "", fmt.Errorf("couldn't find monitoring group matching %q container: %v", containerName, err)
}
// Prepare new one if not exists.
if monGroupPath == "" {
// Remove leading prefix.
// e.g. /my/container -> my/container
if len(containerName) >= minContainerNameLen && containerName[0] == containerPrefix {
containerName = containerName[1:]
}
// Add own prefix and use `-` instead `/`.
// e.g. my/container -> cadvisor-my-container
properContainerName := fmt.Sprintf("%s-%s", monGroupPrefix, strings.Replace(containerName, "/", "-", -1))
monGroupPath = filepath.Join(controlGroupPath, monitoringGroupDir, properContainerName)
err = os.MkdirAll(monGroupPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("couldn't create monitoring group directory for %q container: %w", containerName, err)
}
if !inHostNamespace {
processPath = "/rootfs/proc"
}
for _, pid := range pids {
processThreads, err := getAllProcessThreads(filepath.Join(processPath, pid, processTask))
if err != nil {
return "", err
}
for _, thread := range processThreads {
err = intelrdt.WriteIntelRdtTasks(monGroupPath, thread)
if err != nil {
secondError := os.Remove(monGroupPath)
if secondError != nil {
return "", fmt.Errorf(
"coudn't assign pids to %q container monitoring group: %w \n couldn't clear %q monitoring group: %v",
containerName, err, containerName, secondError)
}
return "", fmt.Errorf("coudn't assign pids to %q container monitoring group: %w", containerName, err)
}
}
}
}
return monGroupPath, nil
}
func getPids(containerName string) ([]int, error) {
if len(containerName) == 0 {
// No container name passed.
return nil, fmt.Errorf(noContainerNameError)
}
pids, err := cgroups.GetAllPids(filepath.Join(pidsPath, containerName))
if err != nil {
return nil, fmt.Errorf("couldn't obtain pids for %q container: %v", containerName, err)
}
return pids, nil
}
// getAllProcessThreads obtains all available processes from directory.
// e.g. ls /proc/4215/task/ -> 4215, 4216, 4217, 4218
// func will return [4215, 4216, 4217, 4218].
func getAllProcessThreads(path string) ([]int, error) {
processThreads := make([]int, 0)
threadDirs, err := ioutil.ReadDir(path)
if err != nil {
return processThreads, err
}
for _, dir := range threadDirs {
pid, err := strconv.Atoi(dir.Name())
if err != nil {
return nil, fmt.Errorf("couldn't parse %q dir: %v", dir.Name(), err)
}
processThreads = append(processThreads, pid)
}
return processThreads, nil
}
// findGroup returns the path of a control/monitoring group in which the pids are.
func findGroup(group string, pids []string, includeGroup bool, exclusive bool) (string, error) {
if len(pids) == 0 {
return "", fmt.Errorf(noPidsPassedError)
}
availablePaths := make([]string, 0)
if includeGroup {
availablePaths = append(availablePaths, group)
}
files, err := ioutil.ReadDir(group)
for _, file := range files {
if _, ok := groupDirectories[file.Name()]; !ok {
availablePaths = append(availablePaths, filepath.Join(group, file.Name()))
}
}
if err != nil {
return "", fmt.Errorf("couldn't obtain groups paths: %w", err)
}
for _, path := range availablePaths {
groupFound, err := arePIDsInGroup(path, pids, exclusive)
if err != nil {
return "", err
}
if groupFound {
return path, nil
}
}
return "", nil
}
// arePIDsInGroup returns true if all of the pids are within control group.
func arePIDsInGroup(path string, pids []string, exclusive bool) (bool, error) {
if len(pids) == 0 {
return false, fmt.Errorf("couldn't obtain pids from %q path: %v", path, noPidsPassedError)
}
tasks, err := readTasksFile(filepath.Join(path, tasksFileName))
if err != nil {
return false, err
}
any := false
for _, pid := range pids {
_, ok := tasks[pid]
if !ok {
// There are missing pids within group.
if any {
return false, fmt.Errorf("there should be all pids in group")
}
return false, nil
}
any = true
}
// Check if there should be only passed pids in group.
if exclusive {
if len(tasks) != len(pids) {
return false, fmt.Errorf("group should have container pids only")
}
}
return true, nil
}
// readTasksFile returns pids map from given tasks path.
func readTasksFile(tasksPath string) (map[string]struct{}, error) {
tasks := make(map[string]struct{})
tasksFile, err := os.Open(tasksPath)
if err != nil {
return tasks, fmt.Errorf("couldn't read tasks file from %q path: %w", tasksPath, err)
}
defer tasksFile.Close()
scanner := bufio.NewScanner(tasksFile)
for scanner.Scan() {
tasks[scanner.Text()] = struct{}{}
}
if err := scanner.Err(); err != nil {
return tasks, fmt.Errorf("couldn't obtain pids from %q path: %w", tasksPath, err)
}
return tasks, nil
}
func readStatFrom(path string, vendorID string) (uint64, error) {
context, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
contextString := string(bytes.TrimSpace(context))
if contextString == unavailable {
err := fmt.Errorf("\"Unavailable\" value from file %q", path)
if vendorID == "AuthenticAMD" {
kernelBugzillaLink := "https://bugzilla.kernel.org/show_bug.cgi?id=213311"
err = fmt.Errorf("%v, possible bug: %q", err, kernelBugzillaLink)
}
return 0, err
}
stat, err := strconv.ParseUint(contextString, 10, 64)
if err != nil {
return stat, fmt.Errorf("unable to parse %q as a uint from file %q", string(context), path)
}
return stat, nil
}
func getIntelRDTStatsFrom(path string, vendorID string) (intelrdt.Stats, error) {
stats := intelrdt.Stats{}
statsDirectories, err := filepath.Glob(filepath.Join(path, monDataDirName, "*"))
if err != nil {
return stats, err
}
if len(statsDirectories) == 0 {
return stats, fmt.Errorf("there is no mon_data stats directories: %q", path)
}
var cmtStats []intelrdt.CMTNumaNodeStats
var mbmStats []intelrdt.MBMNumaNodeStats
for _, dir := range statsDirectories {
if enabledCMT {
llcOccupancy, err := readStatFrom(filepath.Join(dir, llcOccupancyFileName), vendorID)
if err != nil {
return stats, err
}
cmtStats = append(cmtStats, intelrdt.CMTNumaNodeStats{LLCOccupancy: llcOccupancy})
}
if enabledMBM {
mbmTotalBytes, err := readStatFrom(filepath.Join(dir, mbmTotalBytesFileName), vendorID)
if err != nil {
return stats, err
}
mbmLocalBytes, err := readStatFrom(filepath.Join(dir, mbmLocalBytesFileName), vendorID)
if err != nil {
return stats, err
}
mbmStats = append(mbmStats, intelrdt.MBMNumaNodeStats{
MBMTotalBytes: mbmTotalBytes,
MBMLocalBytes: mbmLocalBytes,
})
}
}
stats.CMTStats = &cmtStats
stats.MBMStats = &mbmStats
return stats, nil
}

View File

@ -16,8 +16,9 @@
package stats package stats
import ( import (
v1 "github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
v1 "github.com/google/cadvisor/info/v1"
) )
type NoopManager struct { type NoopManager struct {

View File

@ -25,7 +25,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/google/cadvisor/info/v1" v1 "github.com/google/cadvisor/info/v1"
info "github.com/google/cadvisor/info/v2" info "github.com/google/cadvisor/info/v2"
) )

View File

@ -17,8 +17,9 @@
package cloudinfo package cloudinfo
import ( import (
info "github.com/google/cadvisor/info/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
info "github.com/google/cadvisor/info/v1"
) )
type CloudInfo interface { type CloudInfo interface {

View File

@ -19,8 +19,9 @@ import (
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils/cpuload/netlink"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"github.com/google/cadvisor/utils/cpuload/netlink"
) )
type CpuLoadReader interface { type CpuLoadReader interface {

View File

@ -21,8 +21,9 @@ import (
"os" "os"
"syscall" "syscall"
info "github.com/google/cadvisor/info/v1"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
info "github.com/google/cadvisor/info/v1"
) )
var ( var (

View File

@ -64,6 +64,8 @@ var (
) )
type CacheInfo struct { type CacheInfo struct {
// cache id
Id int
// size in bytes // size in bytes
Size uint64 Size uint64
// cache type - instruction, data, unified // cache type - instruction, data, unified
@ -292,14 +294,24 @@ func getCPUCount(cache string) (count int, err error) {
return return
} }
func (fs *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) { func (fs *realSysFs) GetCacheInfo(cpu int, name string) (CacheInfo, error) {
cachePath := fmt.Sprintf("%s%d/cache/%s", cacheDir, id, name) cachePath := fmt.Sprintf("%s%d/cache/%s", cacheDir, cpu, name)
out, err := ioutil.ReadFile(path.Join(cachePath, "/size")) out, err := ioutil.ReadFile(path.Join(cachePath, "/id"))
if err != nil {
return CacheInfo{}, err
}
var id int
n, err := fmt.Sscanf(string(out), "%d", &id)
if err != nil || n != 1 {
return CacheInfo{}, err
}
out, err = ioutil.ReadFile(path.Join(cachePath, "/size"))
if err != nil { if err != nil {
return CacheInfo{}, err return CacheInfo{}, err
} }
var size uint64 var size uint64
n, err := fmt.Sscanf(string(out), "%dK", &size) n, err = fmt.Sscanf(string(out), "%dK", &size)
if err != nil || n != 1 { if err != nil || n != 1 {
return CacheInfo{}, err return CacheInfo{}, err
} }
@ -325,6 +337,7 @@ func (fs *realSysFs) GetCacheInfo(id int, name string) (CacheInfo, error) {
return CacheInfo{}, err return CacheInfo{}, err
} }
return CacheInfo{ return CacheInfo{
Id: id,
Size: size, Size: size,
Level: level, Level: level,
Type: cacheType, Type: cacheType,

View File

@ -1,3 +1,4 @@
//go:build !x86
// +build !x86 // +build !x86
// Copyright 2021 Google Inc. All Rights Reserved. // Copyright 2021 Google Inc. All Rights Reserved.

View File

@ -1,3 +1,4 @@
//go:build x86
// +build x86 // +build x86
// Copyright 2021 Google Inc. All Rights Reserved. // Copyright 2021 Google Inc. All Rights Reserved.

View File

@ -332,20 +332,34 @@ func addCacheInfo(sysFs sysfs.SysFs, node *info.Node) error {
for _, cache := range caches { for _, cache := range caches {
c := info.Cache{ c := info.Cache{
Id: cache.Id,
Size: cache.Size, Size: cache.Size,
Level: cache.Level, Level: cache.Level,
Type: cache.Type, Type: cache.Type,
} }
if cache.Cpus == numThreadsPerNode && cache.Level > cacheLevel2 { if cache.Level > cacheLevel2 {
// Add a node-level cache. if cache.Cpus == numThreadsPerNode {
cacheFound := false // Add a node level cache.
for _, nodeCache := range node.Caches { cacheFound := false
if nodeCache == c { for _, nodeCache := range node.Caches {
cacheFound = true if nodeCache == c {
cacheFound = true
}
}
if !cacheFound {
node.Caches = append(node.Caches, c)
}
} else {
// Add uncore cache, for architecture in which l3 cache only shared among some cores.
uncoreCacheFound := false
for _, uncoreCache := range node.Cores[coreID].UncoreCaches {
if uncoreCache == c {
uncoreCacheFound = true
}
}
if !uncoreCacheFound {
node.Cores[coreID].UncoreCaches = append(node.Cores[coreID].UncoreCaches, c)
} }
}
if !cacheFound {
node.Caches = append(node.Caches, c)
} }
} else if cache.Cpus == numThreadsPerCore { } else if cache.Cpus == numThreadsPerCore {
// Add core level cache // Add core level cache

22
vendor/modules.txt vendored
View File

@ -179,18 +179,14 @@ github.com/container-storage-interface/spec/lib/go/csi
github.com/containerd/cgroups/stats/v1 github.com/containerd/cgroups/stats/v1
# github.com/containerd/console v1.0.2 => github.com/containerd/console v1.0.2 # github.com/containerd/console v1.0.2 => github.com/containerd/console v1.0.2
github.com/containerd/console github.com/containerd/console
# github.com/containerd/containerd v1.4.9 => github.com/containerd/containerd v1.4.9 # github.com/containerd/containerd v1.4.11 => github.com/containerd/containerd v1.4.11
github.com/containerd/containerd/api/services/containers/v1 github.com/containerd/containerd/api/services/containers/v1
github.com/containerd/containerd/api/services/tasks/v1 github.com/containerd/containerd/api/services/tasks/v1
github.com/containerd/containerd/api/services/version/v1 github.com/containerd/containerd/api/services/version/v1
github.com/containerd/containerd/api/types github.com/containerd/containerd/api/types
github.com/containerd/containerd/api/types/task github.com/containerd/containerd/api/types/task
github.com/containerd/containerd/containers
github.com/containerd/containerd/errdefs github.com/containerd/containerd/errdefs
github.com/containerd/containerd/identifiers
github.com/containerd/containerd/log github.com/containerd/containerd/log
github.com/containerd/containerd/namespaces
github.com/containerd/containerd/pkg/dialer
github.com/containerd/containerd/platforms github.com/containerd/containerd/platforms
# github.com/containerd/ttrpc v1.0.2 => github.com/containerd/ttrpc v1.0.2 # github.com/containerd/ttrpc v1.0.2 => github.com/containerd/ttrpc v1.0.2
github.com/containerd/ttrpc github.com/containerd/ttrpc
@ -236,7 +232,7 @@ github.com/daviddengcn/go-colortext
github.com/docker/distribution/digestset github.com/docker/distribution/digestset
github.com/docker/distribution/reference github.com/docker/distribution/reference
github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/errcode
# github.com/docker/docker v20.10.2+incompatible => github.com/docker/docker v20.10.2+incompatible # github.com/docker/docker v20.10.7+incompatible => github.com/docker/docker v20.10.7+incompatible
## explicit ## explicit
github.com/docker/docker/api github.com/docker/docker/api
github.com/docker/docker/api/types github.com/docker/docker/api/types
@ -362,7 +358,7 @@ github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers github.com/golang/protobuf/ptypes/wrappers
# github.com/google/btree v1.0.1 => github.com/google/btree v1.0.1 # github.com/google/btree v1.0.1 => github.com/google/btree v1.0.1
github.com/google/btree github.com/google/btree
# github.com/google/cadvisor v0.39.2 => github.com/google/cadvisor v0.39.2 # github.com/google/cadvisor v0.43.0 => github.com/google/cadvisor v0.43.0
## explicit ## explicit
github.com/google/cadvisor/accelerators github.com/google/cadvisor/accelerators
github.com/google/cadvisor/cache/memory github.com/google/cadvisor/cache/memory
@ -371,7 +367,12 @@ github.com/google/cadvisor/collector
github.com/google/cadvisor/container github.com/google/cadvisor/container
github.com/google/cadvisor/container/common github.com/google/cadvisor/container/common
github.com/google/cadvisor/container/containerd github.com/google/cadvisor/container/containerd
github.com/google/cadvisor/container/containerd/containers
github.com/google/cadvisor/container/containerd/errdefs
github.com/google/cadvisor/container/containerd/identifiers
github.com/google/cadvisor/container/containerd/install github.com/google/cadvisor/container/containerd/install
github.com/google/cadvisor/container/containerd/namespaces
github.com/google/cadvisor/container/containerd/pkg/dialer
github.com/google/cadvisor/container/crio github.com/google/cadvisor/container/crio
github.com/google/cadvisor/container/crio/install github.com/google/cadvisor/container/crio/install
github.com/google/cadvisor/container/docker github.com/google/cadvisor/container/docker
@ -733,6 +734,7 @@ github.com/tmc/grpc-websocket-proxy/wsproxy
github.com/vishvananda/netlink github.com/vishvananda/netlink
github.com/vishvananda/netlink/nl github.com/vishvananda/netlink/nl
# github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae => github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae # github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae => github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae
## explicit
github.com/vishvananda/netns github.com/vishvananda/netns
# github.com/vmware/govmomi v0.20.3 => github.com/vmware/govmomi v0.20.3 # github.com/vmware/govmomi v0.20.3 => github.com/vmware/govmomi v0.20.3
## explicit ## explicit
@ -2474,7 +2476,7 @@ sigs.k8s.io/yaml
# github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.5.0 # github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.5.0
# github.com/containerd/cgroups => github.com/containerd/cgroups v1.0.1 # github.com/containerd/cgroups => github.com/containerd/cgroups v1.0.1
# github.com/containerd/console => github.com/containerd/console v1.0.2 # github.com/containerd/console => github.com/containerd/console v1.0.2
# github.com/containerd/containerd => github.com/containerd/containerd v1.4.9 # github.com/containerd/containerd => github.com/containerd/containerd v1.4.11
# github.com/containerd/continuity => github.com/containerd/continuity v0.1.0 # github.com/containerd/continuity => github.com/containerd/continuity v0.1.0
# github.com/containerd/fifo => github.com/containerd/fifo v1.0.0 # github.com/containerd/fifo => github.com/containerd/fifo v1.0.0
# github.com/containerd/go-runc => github.com/containerd/go-runc v1.0.0 # github.com/containerd/go-runc => github.com/containerd/go-runc v1.0.0
@ -2493,7 +2495,7 @@ sigs.k8s.io/yaml
# github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd # github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd
# github.com/dnaeon/go-vcr => github.com/dnaeon/go-vcr v1.0.1 # github.com/dnaeon/go-vcr => github.com/dnaeon/go-vcr v1.0.1
# github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible # github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
# github.com/docker/docker => github.com/docker/docker v20.10.2+incompatible # github.com/docker/docker => github.com/docker/docker v20.10.7+incompatible
# github.com/docker/go-connections => github.com/docker/go-connections v0.4.0 # github.com/docker/go-connections => github.com/docker/go-connections v0.4.0
# github.com/docker/go-units => github.com/docker/go-units v0.4.0 # github.com/docker/go-units => github.com/docker/go-units v0.4.0
# github.com/docopt/docopt-go => github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 # github.com/docopt/docopt-go => github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
@ -2538,7 +2540,7 @@ sigs.k8s.io/yaml
# github.com/golang/protobuf => github.com/golang/protobuf v1.5.2 # github.com/golang/protobuf => github.com/golang/protobuf v1.5.2
# github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e # github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e
# github.com/google/btree => github.com/google/btree v1.0.1 # github.com/google/btree => github.com/google/btree v1.0.1
# github.com/google/cadvisor => github.com/google/cadvisor v0.39.2 # github.com/google/cadvisor => github.com/google/cadvisor v0.43.0
# github.com/google/go-cmp => github.com/google/go-cmp v0.5.5 # github.com/google/go-cmp => github.com/google/go-cmp v0.5.5
# github.com/google/gofuzz => github.com/google/gofuzz v1.1.0 # github.com/google/gofuzz => github.com/google/gofuzz v1.1.0
# github.com/google/martian/v3 => github.com/google/martian/v3 v3.1.0 # github.com/google/martian/v3 => github.com/google/martian/v3 v3.1.0