mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-10-12 01:59:05 +00:00
update cadvisor godeps
This commit is contained in:
68
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
68
vendor/github.com/google/cadvisor/container/common/helpers.go
generated
vendored
@@ -223,3 +223,71 @@ func ListContainers(name string, cgroupPaths map[string]string, listType contain
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// AssignDeviceNamesToDiskStats assigns the Device field on the provided DiskIoStats by looking up
|
||||
// the device major and minor identifiers in the provided device namer.
|
||||
func AssignDeviceNamesToDiskStats(namer DeviceNamer, stats *info.DiskIoStats) {
|
||||
assignDeviceNamesToPerDiskStats(
|
||||
namer,
|
||||
stats.IoMerged,
|
||||
stats.IoQueued,
|
||||
stats.IoServiceBytes,
|
||||
stats.IoServiceTime,
|
||||
stats.IoServiced,
|
||||
stats.IoTime,
|
||||
stats.IoWaitTime,
|
||||
stats.Sectors,
|
||||
)
|
||||
}
|
||||
|
||||
// assignDeviceNamesToPerDiskStats looks up device names for the provided stats, caching names
|
||||
// if necessary.
|
||||
func assignDeviceNamesToPerDiskStats(namer DeviceNamer, diskStats ...[]info.PerDiskStats) {
|
||||
devices := make(deviceIdentifierMap)
|
||||
for _, stats := range diskStats {
|
||||
for i, stat := range stats {
|
||||
stats[i].Device = devices.Find(stat.Major, stat.Minor, namer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeviceNamer returns string names for devices by their major and minor id.
|
||||
type DeviceNamer interface {
|
||||
// DeviceName returns the name of the device by its major and minor ids, or false if no
|
||||
// such device is recognized.
|
||||
DeviceName(major, minor uint64) (string, bool)
|
||||
}
|
||||
|
||||
type MachineInfoNamer info.MachineInfo
|
||||
|
||||
func (n *MachineInfoNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.DiskMap {
|
||||
if info.Major == major && info.Minor == minor {
|
||||
return "/dev/" + info.Name, true
|
||||
}
|
||||
}
|
||||
for _, info := range n.Filesystems {
|
||||
if info.DeviceMajor == major && info.DeviceMinor == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
type deviceIdentifier struct {
|
||||
major uint64
|
||||
minor uint64
|
||||
}
|
||||
|
||||
type deviceIdentifierMap map[deviceIdentifier]string
|
||||
|
||||
// Find locates the device name by device identifier out of from, caching the result as necessary.
|
||||
func (m deviceIdentifierMap) Find(major, minor uint64, namer DeviceNamer) string {
|
||||
d := deviceIdentifier{major, minor}
|
||||
if s, ok := m[d]; ok {
|
||||
return s
|
||||
}
|
||||
s, _ := namer.DeviceName(major, minor)
|
||||
m[d] = s
|
||||
return s
|
||||
}
|
||||
|
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
5
vendor/github.com/google/cadvisor/container/docker/docker.go
generated
vendored
@@ -37,7 +37,10 @@ func Status() (v1.DockerStatus, error) {
|
||||
if err != nil {
|
||||
return v1.DockerStatus{}, err
|
||||
}
|
||||
return StatusFromDockerInfo(dockerInfo), nil
|
||||
}
|
||||
|
||||
func StatusFromDockerInfo(dockerInfo dockertypes.Info) v1.DockerStatus {
|
||||
out := v1.DockerStatus{}
|
||||
out.Version = VersionString()
|
||||
out.APIVersion = APIVersionString()
|
||||
@@ -53,7 +56,7 @@ func Status() (v1.DockerStatus, error) {
|
||||
for _, v := range dockerInfo.DriverStatus {
|
||||
out.DriverStatus[v[0]] = v[1]
|
||||
}
|
||||
return out, nil
|
||||
return out
|
||||
}
|
||||
|
||||
func Images() ([]v1.DockerImage, error) {
|
||||
|
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
12
vendor/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -84,6 +84,7 @@ const (
|
||||
devicemapperStorageDriver storageDriver = "devicemapper"
|
||||
aufsStorageDriver storageDriver = "aufs"
|
||||
overlayStorageDriver storageDriver = "overlay"
|
||||
overlay2StorageDriver storageDriver = "overlay2"
|
||||
zfsStorageDriver storageDriver = "zfs"
|
||||
)
|
||||
|
||||
@@ -107,6 +108,7 @@ type dockerFactory struct {
|
||||
|
||||
ignoreMetrics container.MetricSet
|
||||
|
||||
thinPoolName string
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -136,6 +138,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
metadataEnvs,
|
||||
self.dockerVersion,
|
||||
self.ignoreMetrics,
|
||||
self.thinPoolName,
|
||||
self.thinPoolWatcher,
|
||||
self.zfsWatcher,
|
||||
)
|
||||
@@ -323,12 +326,18 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
|
||||
}
|
||||
|
||||
var thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
var (
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher
|
||||
thinPoolName string
|
||||
)
|
||||
if storageDriver(dockerInfo.Driver) == devicemapperStorageDriver {
|
||||
thinPoolWatcher, err = startThinPoolWatcher(dockerInfo)
|
||||
if err != nil {
|
||||
glog.Errorf("devicemapper filesystem stats will not be reported: %v", err)
|
||||
}
|
||||
|
||||
status := StatusFromDockerInfo(*dockerInfo)
|
||||
thinPoolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
var zfsWatcher *zfs.ZfsWatcher
|
||||
@@ -350,6 +359,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics c
|
||||
storageDriver: storageDriver(dockerInfo.Driver),
|
||||
storageDir: RootDir(),
|
||||
ignoreMetrics: ignoreMetrics,
|
||||
thinPoolName: thinPoolName,
|
||||
thinPoolWatcher: thinPoolWatcher,
|
||||
zfsWatcher: zfsWatcher,
|
||||
}
|
||||
|
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
43
vendor/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -112,6 +113,9 @@ type dockerContainerHandler struct {
|
||||
|
||||
// zfs watcher
|
||||
zfsWatcher *zfs.ZfsWatcher
|
||||
|
||||
// container restart count
|
||||
restartCount int
|
||||
}
|
||||
|
||||
var _ container.ContainerHandler = &dockerContainerHandler{}
|
||||
@@ -146,6 +150,7 @@ func newDockerContainerHandler(
|
||||
metadataEnvs []string,
|
||||
dockerVersion []int,
|
||||
ignoreMetrics container.MetricSet,
|
||||
thinPoolName string,
|
||||
thinPoolWatcher *devicemapper.ThinPoolWatcher,
|
||||
zfsWatcher *zfs.ZfsWatcher,
|
||||
) (container.ContainerHandler, error) {
|
||||
@@ -180,18 +185,18 @@ func newDockerContainerHandler(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device
|
||||
// Determine the rootfs storage dir OR the pool name to determine the device.
|
||||
// For devicemapper, we only need the thin pool name, and that is passed in to this call
|
||||
var (
|
||||
rootfsStorageDir string
|
||||
poolName string
|
||||
zfsFilesystem string
|
||||
zfsParent string
|
||||
)
|
||||
switch storageDriver {
|
||||
case aufsStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
|
||||
case overlayStorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
|
||||
case overlayStorageDriver, overlay2StorageDriver:
|
||||
rootfsStorageDir = path.Join(storageDir, string(storageDriver), rwLayerID)
|
||||
case zfsStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
@@ -199,13 +204,6 @@ func newDockerContainerHandler(
|
||||
}
|
||||
zfsParent = status.DriverStatus[dockerutil.DriverStatusParentDataset]
|
||||
zfsFilesystem = path.Join(zfsParent, rwLayerID)
|
||||
case devicemapperStorageDriver:
|
||||
status, err := Status()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine docker status: %v", err)
|
||||
}
|
||||
|
||||
poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
|
||||
}
|
||||
|
||||
// TODO: extract object mother method
|
||||
@@ -219,7 +217,7 @@ func newDockerContainerHandler(
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
poolName: poolName,
|
||||
poolName: thinPoolName,
|
||||
zfsFilesystem: zfsFilesystem,
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
envs: make(map[string]string),
|
||||
@@ -248,6 +246,7 @@ func newDockerContainerHandler(
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
handler.deviceID = ctnr.GraphDriver.Data["DeviceId"]
|
||||
handler.restartCount = ctnr.RestartCount
|
||||
|
||||
// Obtain the IP address for the contianer.
|
||||
// If the NetworkMode starts with 'container:' then we need to use the IP address of the container specified.
|
||||
@@ -383,6 +382,10 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
spec, err := common.GetSpec(self.cgroupPaths, self.machineInfoFactory, self.needNet(), hasFilesystem)
|
||||
|
||||
spec.Labels = self.labels
|
||||
// Only adds restartcount label if it's greater than 0
|
||||
if self.restartCount > 0 {
|
||||
spec.Labels["restartcount"] = strconv.Itoa(self.restartCount)
|
||||
}
|
||||
spec.Envs = self.envs
|
||||
spec.Image = self.image
|
||||
|
||||
@@ -390,6 +393,15 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !self.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
@@ -399,7 +411,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
// Device has to be the pool name to correlate with the device name as
|
||||
// set in the machine info filesystems.
|
||||
device = self.poolName
|
||||
case aufsStorageDriver, overlayStorageDriver:
|
||||
case aufsStorageDriver, overlayStorageDriver, overlay2StorageDriver:
|
||||
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
|
||||
@@ -411,11 +423,6 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return nil
|
||||
}
|
||||
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
limit uint64
|
||||
fsType string
|
||||
|
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
1
vendor/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -48,6 +48,7 @@ const (
|
||||
DiskUsageMetrics MetricKind = "disk"
|
||||
NetworkUsageMetrics MetricKind = "network"
|
||||
NetworkTcpUsageMetrics MetricKind = "tcp"
|
||||
NetworkUdpUsageMetrics MetricKind = "udp"
|
||||
AppMetrics MetricKind = "app"
|
||||
)
|
||||
|
||||
|
84
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
84
vendor/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -17,6 +17,7 @@ package libcontainer
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@@ -118,6 +119,21 @@ func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int, ignoreMetri
|
||||
stats.Network.Tcp6 = t6
|
||||
}
|
||||
}
|
||||
if !ignoreMetrics.Has(container.NetworkUdpUsageMetrics) {
|
||||
u, err := udpStatsFromProc(rootFs, pid, "net/udp")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get udp stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp = u
|
||||
}
|
||||
|
||||
u6, err := udpStatsFromProc(rootFs, pid, "net/udp6")
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Unable to get udp6 stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Udp6 = u6
|
||||
}
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
@@ -291,6 +307,74 @@ func scanTcpStats(tcpStatsFile string) (info.TcpStat, error) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func udpStatsFromProc(rootFs string, pid int, file string) (info.UdpStat, error) {
|
||||
var err error
|
||||
var udpStats info.UdpStat
|
||||
|
||||
udpStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), file)
|
||||
|
||||
r, err := os.Open(udpStatsFile)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("failure opening %s: %v", udpStatsFile, err)
|
||||
}
|
||||
|
||||
udpStats, err = scanUdpStats(r)
|
||||
if err != nil {
|
||||
return udpStats, fmt.Errorf("couldn't read udp stats: %v", err)
|
||||
}
|
||||
|
||||
return udpStats, nil
|
||||
}
|
||||
|
||||
func scanUdpStats(r io.Reader) (info.UdpStat, error) {
|
||||
var stats info.UdpStat
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
// Discard header line
|
||||
if b := scanner.Scan(); !b {
|
||||
return stats, scanner.Err()
|
||||
}
|
||||
|
||||
listening := uint64(0)
|
||||
dropped := uint64(0)
|
||||
rxQueued := uint64(0)
|
||||
txQueued := uint64(0)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Format: sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
|
||||
|
||||
listening++
|
||||
|
||||
fs := strings.Fields(line)
|
||||
if len(fs) != 13 {
|
||||
continue
|
||||
}
|
||||
|
||||
rx, tx := uint64(0), uint64(0)
|
||||
fmt.Sscanf(fs[4], "%X:%X", &rx, &tx)
|
||||
rxQueued += rx
|
||||
txQueued += tx
|
||||
|
||||
d, err := strconv.Atoi(string(fs[12]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
dropped += uint64(d)
|
||||
}
|
||||
|
||||
stats = info.UdpStat{
|
||||
Listen: listening,
|
||||
Dropped: dropped,
|
||||
RxQueued: rxQueued,
|
||||
TxQueued: txQueued,
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
|
27
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
27
vendor/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@@ -197,6 +197,7 @@ func fsToFsStats(fs *fs.Fs) info.FsStats {
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
var allFs []fs.Fs
|
||||
// Get Filesystem information only for the root cgroup.
|
||||
if isRootCgroup(self.name) {
|
||||
filesystems, err := self.fsInfo.GetGlobalFsInfo()
|
||||
@@ -207,6 +208,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs := filesystems[i]
|
||||
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
|
||||
}
|
||||
allFs = filesystems
|
||||
} else if len(self.externalMounts) > 0 {
|
||||
var mountSet map[string]struct{}
|
||||
mountSet = make(map[string]struct{})
|
||||
@@ -221,7 +223,10 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
fs := filesystems[i]
|
||||
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
|
||||
}
|
||||
allFs = filesystems
|
||||
}
|
||||
|
||||
common.AssignDeviceNamesToDiskStats(&fsNamer{fs: allFs, factory: self.machineInfoFactory}, &stats.DiskIo)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -272,3 +277,25 @@ func (self *rawContainerHandler) Exists() bool {
|
||||
func (self *rawContainerHandler) Type() container.ContainerType {
|
||||
return container.ContainerTypeRaw
|
||||
}
|
||||
|
||||
type fsNamer struct {
|
||||
fs []fs.Fs
|
||||
factory info.MachineInfoFactory
|
||||
info common.DeviceNamer
|
||||
}
|
||||
|
||||
func (n *fsNamer) DeviceName(major, minor uint64) (string, bool) {
|
||||
for _, info := range n.fs {
|
||||
if uint64(info.Major) == major && uint64(info.Minor) == minor {
|
||||
return info.Device, true
|
||||
}
|
||||
}
|
||||
if n.info == nil {
|
||||
mi, err := n.factory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
n.info = (*common.MachineInfoNamer)(mi)
|
||||
}
|
||||
return n.info.DeviceName(major, minor)
|
||||
}
|
||||
|
13
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
13
vendor/github.com/google/cadvisor/container/rkt/handler.go
generated
vendored
@@ -202,6 +202,15 @@ func (handler *rktContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
}
|
||||
|
||||
func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
mi, err := handler.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !handler.ignoreMetrics.Has(container.DiskIOMetrics) {
|
||||
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
|
||||
}
|
||||
|
||||
if handler.ignoreMetrics.Has(container.DiskUsageMetrics) {
|
||||
return nil
|
||||
}
|
||||
@@ -211,10 +220,6 @@ func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
return err
|
||||
}
|
||||
|
||||
mi, err := handler.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var limit uint64 = 0
|
||||
|
||||
// Use capacity as limit.
|
||||
|
Reference in New Issue
Block a user