mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
update cadvisor dependency to v0.35.0
This commit is contained in:
parent
15475b4321
commit
a445c97a0e
6
go.mod
6
go.mod
@ -39,7 +39,6 @@ require (
|
||||
github.com/coredns/corefile-migration v1.0.4
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
|
||||
github.com/coreos/rkt v1.30.0 // indirect
|
||||
github.com/cpuguy83/go-md2man v1.0.10
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
@ -64,7 +63,7 @@ require (
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903
|
||||
github.com/golang/mock v1.2.0
|
||||
github.com/google/cadvisor v0.34.0
|
||||
github.com/google/cadvisor v0.35.0
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/google/gofuzz v1.0.0
|
||||
github.com/google/uuid v1.1.1
|
||||
@ -235,7 +234,6 @@ replace (
|
||||
github.com/coreos/go-semver => github.com/coreos/go-semver v0.3.0
|
||||
github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
|
||||
github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
|
||||
github.com/coreos/rkt => github.com/coreos/rkt v1.30.0
|
||||
github.com/cpuguy83/go-md2man => github.com/cpuguy83/go-md2man v1.0.10
|
||||
github.com/creack/pty => github.com/creack/pty v1.1.7
|
||||
github.com/cyphar/filepath-securejoin => github.com/cyphar/filepath-securejoin v0.2.2
|
||||
@ -318,7 +316,7 @@ replace (
|
||||
github.com/golangplus/fmt => github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995
|
||||
github.com/golangplus/testing => github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e
|
||||
github.com/google/btree => github.com/google/btree v1.0.0
|
||||
github.com/google/cadvisor => github.com/google/cadvisor v0.34.0
|
||||
github.com/google/cadvisor => github.com/google/cadvisor v0.35.0
|
||||
github.com/google/go-cmp => github.com/google/go-cmp v0.3.0
|
||||
github.com/google/go-github => github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/google/go-querystring => github.com/google/go-querystring v1.0.0
|
||||
|
6
go.sum
6
go.sum
@ -114,8 +114,6 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/rkt v1.30.0 h1:Kkt6sYeEGKxA3Y7SCrY+nHoXkWed6Jr2BBY42GqMymM=
|
||||
github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
@ -243,8 +241,8 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e h1:KhcknUwkWHKZ
|
||||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/cadvisor v0.34.0 h1:No7G6U/TasplR9uNqyc5Jj0Bet5VSYsK5xLygOf4pUw=
|
||||
github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48=
|
||||
github.com/google/cadvisor v0.35.0 h1:qivoEm+iGqTrd0CKSmQidxfOxUxkNZovvYs/8G6B6ao=
|
||||
github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
|
1
vendor/github.com/google/cadvisor/container/common/BUILD
generated
vendored
1
vendor/github.com/google/cadvisor/container/common/BUILD
generated
vendored
@ -17,6 +17,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/utils:go_default_library",
|
||||
"//vendor/github.com/karrick/godirwalk:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/inotify:go_default_library",
|
||||
|
57
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
57
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -27,6 +28,7 @@ import (
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/klog"
|
||||
@ -47,6 +49,25 @@ func DebugInfo(watches map[string][]string) map[string][]string {
|
||||
return out
|
||||
}
|
||||
|
||||
// findFileInAncestorDir returns the path to the parent directory that contains the specified file.
|
||||
// "" is returned if the lookup reaches the limit.
|
||||
func findFileInAncestorDir(current, file, limit string) (string, error) {
|
||||
for {
|
||||
fpath := path.Join(current, file)
|
||||
_, err := os.Stat(fpath)
|
||||
if err == nil {
|
||||
return current, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if current == limit {
|
||||
return "", nil
|
||||
}
|
||||
current = filepath.Dir(current)
|
||||
}
|
||||
}
|
||||
|
||||
func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoFactory, hasNetwork, hasFilesystem bool) (info.ContainerSpec, error) {
|
||||
var spec info.ContainerSpec
|
||||
|
||||
@ -100,7 +121,12 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
||||
if ok {
|
||||
if utils.FileExists(cpusetRoot) {
|
||||
spec.HasCpu = true
|
||||
mask := readString(cpusetRoot, "cpuset.cpus")
|
||||
mask := ""
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
mask = readString(cpusetRoot, "cpuset.cpus.effective")
|
||||
} else {
|
||||
mask = readString(cpusetRoot, "cpuset.cpus")
|
||||
}
|
||||
spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores)
|
||||
}
|
||||
}
|
||||
@ -108,11 +134,24 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
||||
// Memory
|
||||
memoryRoot, ok := cgroupPaths["memory"]
|
||||
if ok {
|
||||
if utils.FileExists(memoryRoot) {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.soft_limit_in_bytes")
|
||||
if !cgroups.IsCgroup2UnifiedMode() {
|
||||
if utils.FileExists(memoryRoot) {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.soft_limit_in_bytes")
|
||||
}
|
||||
} else {
|
||||
memoryRoot, err := findFileInAncestorDir(memoryRoot, "memory.max", "/sys/fs/cgroup")
|
||||
if err != nil {
|
||||
return spec, err
|
||||
}
|
||||
if memoryRoot != "" {
|
||||
spec.HasMemory = true
|
||||
spec.Memory.Reservation = readUInt64(memoryRoot, "memory.high")
|
||||
spec.Memory.Limit = readUInt64(memoryRoot, "memory.max")
|
||||
spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.swap.max")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -128,7 +167,11 @@ func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoF
|
||||
spec.HasNetwork = hasNetwork
|
||||
spec.HasFilesystem = hasFilesystem
|
||||
|
||||
if blkioRoot, ok := cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
|
||||
ioControllerName := "blkio"
|
||||
if cgroups.IsCgroup2UnifiedMode() {
|
||||
ioControllerName = "io"
|
||||
}
|
||||
if blkioRoot, ok := cgroupPaths[ioControllerName]; ok && utils.FileExists(blkioRoot) {
|
||||
spec.HasDiskIo = true
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/google/cadvisor/container/container.go
generated
vendored
1
vendor/github.com/google/cadvisor/container/container.go
generated
vendored
@ -32,7 +32,6 @@ type ContainerType int
|
||||
const (
|
||||
ContainerTypeRaw ContainerType = iota
|
||||
ContainerTypeDocker
|
||||
ContainerTypeRkt
|
||||
ContainerTypeSystemd
|
||||
ContainerTypeCrio
|
||||
ContainerTypeContainerd
|
||||
|
2
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
2
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@ -47,6 +47,7 @@ func GetCgroupSubsystems(includedMetrics container.MetricSet) (CgroupSubsystems,
|
||||
//currently we only support disable blkio subsystem
|
||||
if !includedMetrics.Has(container.DiskIOMetrics) {
|
||||
disableCgroups["blkio"] = struct{}{}
|
||||
disableCgroups["io"] = struct{}{}
|
||||
}
|
||||
return getCgroupSubsystemsHelper(allCgroups, disableCgroups)
|
||||
}
|
||||
@ -109,6 +110,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
||||
"pids": {},
|
||||
"cpuset": {},
|
||||
"blkio": {},
|
||||
"io": {},
|
||||
"devices": {},
|
||||
}
|
||||
|
||||
|
40
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
40
vendor/github.com/google/cadvisor/fs/fs.go
generated
vendored
@ -41,7 +41,6 @@ import (
|
||||
const (
|
||||
LabelSystemRoot = "root"
|
||||
LabelDockerImages = "docker-images"
|
||||
LabelRktImages = "rkt-images"
|
||||
LabelCrioImages = "crio-images"
|
||||
)
|
||||
|
||||
@ -118,7 +117,6 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
fsInfo.mounts[mount.Mountpoint] = mount
|
||||
}
|
||||
|
||||
fsInfo.addRktImagesLabel(context, mounts)
|
||||
// need to call this before the log line below printing out the partitions, as this function may
|
||||
// add a "partition" for devicemapper to fsInfo.partitions
|
||||
fsInfo.addDockerImagesLabel(context, mounts)
|
||||
@ -167,19 +165,22 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
|
||||
|
||||
supportedFsType := map[string]bool{
|
||||
// all ext systems are checked through prefix.
|
||||
"btrfs": true,
|
||||
"tmpfs": true,
|
||||
"xfs": true,
|
||||
"zfs": true,
|
||||
"btrfs": true,
|
||||
"overlay": true,
|
||||
"tmpfs": true,
|
||||
"xfs": true,
|
||||
"zfs": true,
|
||||
}
|
||||
|
||||
for _, mount := range mounts {
|
||||
if !strings.HasPrefix(mount.Fstype, "ext") && !supportedFsType[mount.Fstype] {
|
||||
continue
|
||||
}
|
||||
// Avoid bind mounts.
|
||||
// Avoid bind mounts, exclude tmpfs.
|
||||
if _, ok := partitions[mount.Source]; ok {
|
||||
continue
|
||||
if mount.Fstype != "tmpfs" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
hasPrefix := false
|
||||
@ -193,6 +194,10 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
|
||||
continue
|
||||
}
|
||||
|
||||
// using mountpoint to replace device once fstype it tmpfs
|
||||
if mount.Fstype == "tmpfs" {
|
||||
mount.Source = mount.Mountpoint
|
||||
}
|
||||
// btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
|
||||
// instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
|
||||
if mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
|
||||
@ -205,6 +210,11 @@ func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) ma
|
||||
}
|
||||
}
|
||||
|
||||
// overlay fix: Making mount source unique for all overlay mounts, using the mount's major and minor ids.
|
||||
if mount.Fstype == "overlay" {
|
||||
mount.Source = fmt.Sprintf("%s_%d-%d", mount.Source, mount.Major, mount.Minor)
|
||||
}
|
||||
|
||||
partitions[mount.Source] = partition{
|
||||
fsType: mount.Fstype,
|
||||
mountpoint: mount.Mountpoint,
|
||||
@ -290,20 +300,6 @@ func (self *RealFsInfo) addCrioImagesLabel(context Context, mounts []*mount.Info
|
||||
}
|
||||
}
|
||||
|
||||
func (self *RealFsInfo) addRktImagesLabel(context Context, mounts []*mount.Info) {
|
||||
if context.RktPath != "" {
|
||||
rktPath := context.RktPath
|
||||
rktImagesPaths := map[string]struct{}{
|
||||
"/": {},
|
||||
}
|
||||
for rktPath != "/" && rktPath != "." {
|
||||
rktImagesPaths[rktPath] = struct{}{}
|
||||
rktPath = filepath.Dir(rktPath)
|
||||
}
|
||||
self.updateContainerImagesPath(LabelRktImages, mounts, rktImagesPaths)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a list of possible mount points for docker image management from the docker root directory.
|
||||
// Right now, we look for each type of supported graph driver directories, but we can do better by parsing
|
||||
// some of the context from `docker info`.
|
||||
|
5
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
5
vendor/github.com/google/cadvisor/fs/types.go
generated
vendored
@ -20,9 +20,8 @@ import (
|
||||
|
||||
type Context struct {
|
||||
// docker root directory.
|
||||
Docker DockerContext
|
||||
RktPath string
|
||||
Crio CrioContext
|
||||
Docker DockerContext
|
||||
Crio CrioContext
|
||||
}
|
||||
|
||||
type DockerContext struct {
|
||||
|
7
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
7
vendor/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
@ -38,9 +38,10 @@ type FsInfo struct {
|
||||
type Node struct {
|
||||
Id int `json:"node_id"`
|
||||
// Per-node memory
|
||||
Memory uint64 `json:"memory"`
|
||||
Cores []Core `json:"cores"`
|
||||
Caches []Cache `json:"caches"`
|
||||
Memory uint64 `json:"memory"`
|
||||
HugePages []HugePagesInfo `json:"hugepages"`
|
||||
Cores []Core `json:"cores"`
|
||||
Caches []Cache `json:"caches"`
|
||||
}
|
||||
|
||||
type Core struct {
|
||||
|
46
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
46
vendor/github.com/google/cadvisor/machine/info.go
generated
vendored
@ -17,10 +17,8 @@ package machine
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
||||
@ -54,45 +52,6 @@ func getInfoFromFiles(filePaths string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetHugePagesInfo returns information about pre-allocated huge pages
|
||||
func GetHugePagesInfo() ([]info.HugePagesInfo, error) {
|
||||
var hugePagesInfo []info.HugePagesInfo
|
||||
files, err := ioutil.ReadDir(hugepagesDirectory)
|
||||
if err != nil {
|
||||
// treat as non-fatal since kernels and machine can be
|
||||
// configured to disable hugepage support
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
for _, st := range files {
|
||||
nameArray := strings.Split(st.Name(), "-")
|
||||
pageSizeArray := strings.Split(nameArray[1], "kB")
|
||||
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
|
||||
numFile := hugepagesDirectory + st.Name() + "/nr_hugepages"
|
||||
val, err := ioutil.ReadFile(numFile)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
var numPages uint64
|
||||
// we use sscanf as the file as a new-line that trips up ParseUint
|
||||
// it returns the number of tokens successfully parsed, so if
|
||||
// n != 1, it means we were unable to parse a number from the file
|
||||
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
||||
if err != nil || n != 1 {
|
||||
return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val))
|
||||
}
|
||||
|
||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
||||
NumPages: numPages,
|
||||
PageSize: pageSize,
|
||||
})
|
||||
}
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.MachineInfo, error) {
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
@ -100,6 +59,9 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
||||
}
|
||||
|
||||
cpuinfo, err := ioutil.ReadFile(filepath.Join(rootFs, "/proc/cpuinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clockSpeed, err := GetClockSpeed(cpuinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -110,7 +72,7 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hugePagesInfo, err := GetHugePagesInfo()
|
||||
hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
52
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
52
vendor/github.com/google/cadvisor/machine/machine.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
// s390/s390x changes
|
||||
"runtime"
|
||||
|
||||
@ -49,6 +50,7 @@ var (
|
||||
|
||||
const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
|
||||
const cpuBusPath = "/sys/bus/cpu/devices/"
|
||||
const nodePath = "/sys/devices/system/node"
|
||||
|
||||
// GetClockSpeed returns the CPU clock speed, given a []byte formatted as the /proc/cpuinfo file.
|
||||
func GetClockSpeed(procInfo []byte) (uint64, error) {
|
||||
@ -191,6 +193,47 @@ func getNodeIdFromCpuBus(cpuBusPath string, threadId int) (int, error) {
|
||||
return nodeId, nil
|
||||
}
|
||||
|
||||
// GetHugePagesInfo returns information about pre-allocated huge pages
|
||||
// hugepagesDirectory should be top directory of hugepages
|
||||
// Such as: /sys/kernel/mm/hugepages/
|
||||
func GetHugePagesInfo(hugepagesDirectory string) ([]info.HugePagesInfo, error) {
|
||||
var hugePagesInfo []info.HugePagesInfo
|
||||
files, err := ioutil.ReadDir(hugepagesDirectory)
|
||||
if err != nil {
|
||||
// treat as non-fatal since kernels and machine can be
|
||||
// configured to disable hugepage support
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
for _, st := range files {
|
||||
nameArray := strings.Split(st.Name(), "-")
|
||||
pageSizeArray := strings.Split(nameArray[1], "kB")
|
||||
pageSize, err := strconv.ParseUint(string(pageSizeArray[0]), 10, 64)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
|
||||
numFile := hugepagesDirectory + st.Name() + "/nr_hugepages"
|
||||
val, err := ioutil.ReadFile(numFile)
|
||||
if err != nil {
|
||||
return hugePagesInfo, err
|
||||
}
|
||||
var numPages uint64
|
||||
// we use sscanf as the file as a new-line that trips up ParseUint
|
||||
// it returns the number of tokens successfully parsed, so if
|
||||
// n != 1, it means we were unable to parse a number from the file
|
||||
n, err := fmt.Sscanf(string(val), "%d", &numPages)
|
||||
if err != nil || n != 1 {
|
||||
return hugePagesInfo, fmt.Errorf("could not parse file %v contents %q", numFile, string(val))
|
||||
}
|
||||
|
||||
hugePagesInfo = append(hugePagesInfo, info.HugePagesInfo{
|
||||
NumPages: numPages,
|
||||
PageSize: pageSize,
|
||||
})
|
||||
}
|
||||
return hugePagesInfo, nil
|
||||
}
|
||||
|
||||
func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
|
||||
nodes := []info.Node{}
|
||||
|
||||
@ -352,6 +395,15 @@ func addNode(nodes *[]info.Node, id int) (int, error) {
|
||||
}
|
||||
node.Memory = uint64(m)
|
||||
}
|
||||
// Look for per-node hugepages info using node id
|
||||
// Such as: /sys/devices/system/node/node%d/hugepages
|
||||
hugepagesDirectory := fmt.Sprintf("%s/node%d/hugepages/", nodePath, id)
|
||||
hugePagesInfo, err := GetHugePagesInfo(hugepagesDirectory)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
node.HugePages = hugePagesInfo
|
||||
|
||||
*nodes = append(*nodes, node)
|
||||
idx = len(*nodes) - 1
|
||||
}
|
||||
|
3
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
3
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
@ -185,6 +185,9 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) {
|
||||
if cgroups == "-" {
|
||||
return "/", nil
|
||||
}
|
||||
if strings.HasPrefix(cgroups, "0::") {
|
||||
return cgroups[3:], nil
|
||||
}
|
||||
matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
klog.V(3).Infof("failed to get memory cgroup path from %q", cgroups)
|
||||
|
17
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
17
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@ -918,13 +918,15 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
devicesCgroupPath, err := handler.GetCgroupPath("devices")
|
||||
if err != nil {
|
||||
klog.Warningf("Error getting devices cgroup path: %v", err)
|
||||
} else {
|
||||
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
|
||||
if !cgroups.IsCgroup2UnifiedMode() {
|
||||
devicesCgroupPath, err := handler.GetCgroupPath("devices")
|
||||
if err != nil {
|
||||
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
klog.Warningf("Error getting devices cgroup path: %v", err)
|
||||
} else {
|
||||
cont.nvidiaCollector, err = m.nvidiaManager.GetCollector(devicesCgroupPath)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("GPU metrics may be unavailable/incomplete for container %q: %v", cont.info.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1119,9 +1121,6 @@ func (self *manager) watchForNewContainers(quit chan error) error {
|
||||
switch {
|
||||
case event.EventType == watcher.ContainerAdd:
|
||||
switch event.WatchSource {
|
||||
// the Rkt and Raw watchers can race, and if Raw wins, we want Rkt to override and create a new handler for Rkt containers
|
||||
case watcher.Rkt:
|
||||
err = self.overrideContainer(event.Name, event.WatchSource)
|
||||
default:
|
||||
err = self.createContainer(event.Name, event.WatchSource)
|
||||
}
|
||||
|
2
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
2
vendor/github.com/google/cadvisor/utils/cpuload/netlink/reader.go
generated
vendored
@ -66,10 +66,10 @@ func (self *NetlinkReader) GetCpuLoad(name string, path string) (info.LoadStats,
|
||||
}
|
||||
|
||||
cfd, err := os.Open(path)
|
||||
defer cfd.Close()
|
||||
if err != nil {
|
||||
return info.LoadStats{}, fmt.Errorf("failed to open cgroup path %s: %q", path, err)
|
||||
}
|
||||
defer cfd.Close()
|
||||
|
||||
stats, err := getLoadStats(self.familyId, cfd, self.conn)
|
||||
if err != nil {
|
||||
|
1
vendor/github.com/google/cadvisor/watcher/watcher.go
generated
vendored
1
vendor/github.com/google/cadvisor/watcher/watcher.go
generated
vendored
@ -28,7 +28,6 @@ type ContainerWatchSource int
|
||||
|
||||
const (
|
||||
Raw ContainerWatchSource = iota
|
||||
Rkt
|
||||
)
|
||||
|
||||
// ContainerEvent represents a
|
||||
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@ -342,7 +342,7 @@ github.com/golang/protobuf/ptypes/timestamp
|
||||
github.com/golang/protobuf/ptypes/wrappers
|
||||
# github.com/google/btree v1.0.0 => github.com/google/btree v1.0.0
|
||||
github.com/google/btree
|
||||
# github.com/google/cadvisor v0.34.0 => github.com/google/cadvisor v0.34.0
|
||||
# github.com/google/cadvisor v0.35.0 => github.com/google/cadvisor v0.35.0
|
||||
github.com/google/cadvisor/accelerators
|
||||
github.com/google/cadvisor/cache/memory
|
||||
github.com/google/cadvisor/client/v2
|
||||
|
Loading…
Reference in New Issue
Block a user