mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-10-12 18:18:51 +00:00
Move deps from _workspace/ to vendor/
godep restore pushd $GOPATH/src/github.com/appc/spec git co master popd go get go4.org/errorutil rm -rf Godeps godep save ./... git add vendor git add -f $(git ls-files --other vendor/) git co -- Godeps/LICENSES Godeps/.license_file_state Godeps/OWNERS
This commit is contained in:
583
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
Normal file
583
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
Normal file
@@ -0,0 +1,583 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/cache/memory"
|
||||
"github.com/google/cadvisor/collector"
|
||||
"github.com/google/cadvisor/container"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/info/v2"
|
||||
"github.com/google/cadvisor/summary"
|
||||
"github.com/google/cadvisor/utils/cpuload"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Housekeeping interval.
|
||||
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")
|
||||
|
||||
var cgroupPathRegExp = regexp.MustCompile(`devices[^:]*:(.*?)[,;$]`)
|
||||
|
||||
type containerInfo struct {
|
||||
info.ContainerReference
|
||||
Subcontainers []info.ContainerReference
|
||||
Spec info.ContainerSpec
|
||||
}
|
||||
|
||||
type containerData struct {
|
||||
handler container.ContainerHandler
|
||||
info containerInfo
|
||||
memoryCache *memory.InMemoryCache
|
||||
lock sync.Mutex
|
||||
loadReader cpuload.CpuLoadReader
|
||||
summaryReader *summary.StatsSummary
|
||||
loadAvg float64 // smoothed load average seen so far.
|
||||
housekeepingInterval time.Duration
|
||||
maxHousekeepingInterval time.Duration
|
||||
allowDynamicHousekeeping bool
|
||||
lastUpdatedTime time.Time
|
||||
lastErrorTime time.Time
|
||||
|
||||
// Decay value used for load average smoothing. Interval length of 10 seconds is used.
|
||||
loadDecay float64
|
||||
|
||||
// Whether to log the usage of this container when it is updated.
|
||||
logUsage bool
|
||||
|
||||
// Tells the container to stop.
|
||||
stop chan bool
|
||||
|
||||
// Runs custom metric collectors.
|
||||
collectorManager collector.CollectorManager
|
||||
}
|
||||
|
||||
// jitter returns a time.Duration between duration and duration + maxFactor * duration,
|
||||
// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a
|
||||
// suggested default value will be chosen.
|
||||
func jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
if maxFactor <= 0.0 {
|
||||
maxFactor = 1.0
|
||||
}
|
||||
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
|
||||
return wait
|
||||
}
|
||||
|
||||
func (c *containerData) Start() error {
|
||||
go c.housekeeping()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *containerData) Stop() error {
|
||||
err := c.memoryCache.RemoveContainer(c.info.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.stop <- true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *containerData) allowErrorLogging() bool {
|
||||
if time.Since(c.lastErrorTime) > time.Minute {
|
||||
c.lastErrorTime = time.Now()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *containerData) GetInfo() (*containerInfo, error) {
|
||||
// Get spec and subcontainers.
|
||||
if time.Since(c.lastUpdatedTime) > 5*time.Second {
|
||||
err := c.updateSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.updateSubcontainers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.lastUpdatedTime = time.Now()
|
||||
}
|
||||
// Make a copy of the info for the user.
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return &c.info, nil
|
||||
}
|
||||
|
||||
func (c *containerData) DerivedStats() (v2.DerivedStats, error) {
|
||||
if c.summaryReader == nil {
|
||||
return v2.DerivedStats{}, fmt.Errorf("derived stats not enabled for container %q", c.info.Name)
|
||||
}
|
||||
return c.summaryReader.DerivedStats()
|
||||
}
|
||||
|
||||
func (c *containerData) getCgroupPath(cgroups string) (string, error) {
|
||||
if cgroups == "-" {
|
||||
return "/", nil
|
||||
}
|
||||
matches := cgroupPathRegExp.FindSubmatch([]byte(cgroups))
|
||||
if len(matches) != 2 {
|
||||
glog.V(3).Infof("failed to get devices cgroup path from %q", cgroups)
|
||||
// return root in case of failures - devices hierarchy might not be enabled.
|
||||
return "/", nil
|
||||
}
|
||||
return string(matches[1]), nil
|
||||
}
|
||||
|
||||
// Returns contents of a file inside the container root.
|
||||
// Takes in a path relative to container root.
|
||||
func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte, error) {
|
||||
pids, err := c.getContainerPids(inHostNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(rjnagal): Optimize by just reading container's cgroup.proc file when in host namespace.
|
||||
rootfs := "/"
|
||||
if !inHostNamespace {
|
||||
rootfs = "/rootfs"
|
||||
}
|
||||
for _, pid := range pids {
|
||||
filePath := path.Join(rootfs, "/proc", pid, "/root", filepath)
|
||||
glog.V(3).Infof("Trying path %q", filePath)
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
if err == nil {
|
||||
return data, err
|
||||
}
|
||||
}
|
||||
// No process paths could be found. Declare config non-existent.
|
||||
return nil, fmt.Errorf("file %q does not exist.", filepath)
|
||||
}
|
||||
|
||||
// Return output for ps command in host /proc with specified format
|
||||
func (c *containerData) getPsOutput(inHostNamespace bool, format string) ([]byte, error) {
|
||||
args := []string{}
|
||||
command := "ps"
|
||||
if !inHostNamespace {
|
||||
command = "/usr/sbin/chroot"
|
||||
args = append(args, "/rootfs", "ps")
|
||||
}
|
||||
args = append(args, "-e", "-o", format)
|
||||
out, err := exec.Command(command, args...).Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute %q command: %v", command, err)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Get pids of processes in this container.
|
||||
// A slightly lighterweight call than GetProcessList if other details are not required.
|
||||
func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error) {
|
||||
format := "pid,cgroup"
|
||||
out, err := c.getPsOutput(inHostNamespace, format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedFields := 2
|
||||
lines := strings.Split(string(out), "\n")
|
||||
pids := []string{}
|
||||
for _, line := range lines[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < expectedFields {
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
pid := fields[0]
|
||||
cgroup, err := c.getCgroupPath(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[1], err)
|
||||
}
|
||||
if c.info.Name == cgroup {
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
|
||||
// report all processes for root.
|
||||
isRoot := c.info.Name == "/"
|
||||
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup"
|
||||
out, err := c.getPsOutput(inHostNamespace, format)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedFields := 12
|
||||
processes := []v2.ProcessInfo{}
|
||||
lines := strings.Split(string(out), "\n")
|
||||
for _, line := range lines[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < expectedFields {
|
||||
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
|
||||
}
|
||||
pid, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pid %q: %v", fields[1], err)
|
||||
}
|
||||
ppid, err := strconv.Atoi(fields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ppid %q: %v", fields[2], err)
|
||||
}
|
||||
percentCpu, err := strconv.ParseFloat(fields[4], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid cpu percent %q: %v", fields[4], err)
|
||||
}
|
||||
percentMem, err := strconv.ParseFloat(fields[5], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid memory percent %q: %v", fields[5], err)
|
||||
}
|
||||
rss, err := strconv.ParseUint(fields[6], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid rss %q: %v", fields[6], err)
|
||||
}
|
||||
// convert to bytes
|
||||
rss *= 1024
|
||||
vs, err := strconv.ParseUint(fields[7], 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid virtual size %q: %v", fields[7], err)
|
||||
}
|
||||
// convert to bytes
|
||||
vs *= 1024
|
||||
cgroup, err := c.getCgroupPath(fields[11])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[11], err)
|
||||
}
|
||||
// Remove the ps command we just ran from cadvisor container.
|
||||
// Not necessary, but makes the cadvisor page look cleaner.
|
||||
if !inHostNamespace && cadvisorContainer == cgroup && fields[10] == "ps" {
|
||||
continue
|
||||
}
|
||||
var cgroupPath string
|
||||
if isRoot {
|
||||
cgroupPath = cgroup
|
||||
}
|
||||
|
||||
if isRoot || c.info.Name == cgroup {
|
||||
processes = append(processes, v2.ProcessInfo{
|
||||
User: fields[0],
|
||||
Pid: pid,
|
||||
Ppid: ppid,
|
||||
StartTime: fields[3],
|
||||
PercentCpu: float32(percentCpu),
|
||||
PercentMemory: float32(percentMem),
|
||||
RSS: rss,
|
||||
VirtualSize: vs,
|
||||
Status: fields[8],
|
||||
RunningTime: fields[9],
|
||||
Cmd: fields[10],
|
||||
CgroupPath: cgroupPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (*containerData, error) {
|
||||
if memoryCache == nil {
|
||||
return nil, fmt.Errorf("nil memory storage")
|
||||
}
|
||||
if handler == nil {
|
||||
return nil, fmt.Errorf("nil container handler")
|
||||
}
|
||||
ref, err := handler.ContainerReference()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cont := &containerData{
|
||||
handler: handler,
|
||||
memoryCache: memoryCache,
|
||||
housekeepingInterval: *HousekeepingInterval,
|
||||
maxHousekeepingInterval: maxHousekeepingInterval,
|
||||
allowDynamicHousekeeping: allowDynamicHousekeeping,
|
||||
loadReader: loadReader,
|
||||
logUsage: logUsage,
|
||||
loadAvg: -1.0, // negative value indicates uninitialized.
|
||||
stop: make(chan bool, 1),
|
||||
collectorManager: collectorManager,
|
||||
}
|
||||
cont.info.ContainerReference = ref
|
||||
|
||||
cont.loadDecay = math.Exp(float64(-cont.housekeepingInterval.Seconds() / 10))
|
||||
|
||||
err = cont.updateSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cont.summaryReader, err = summary.New(cont.info.Spec)
|
||||
if err != nil {
|
||||
cont.summaryReader = nil
|
||||
glog.Warningf("Failed to create summary reader for %q: %v", ref.Name, err)
|
||||
}
|
||||
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
// Determine when the next housekeeping should occur.
|
||||
func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Time {
|
||||
if self.allowDynamicHousekeeping {
|
||||
var empty time.Time
|
||||
stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2)
|
||||
if err != nil {
|
||||
if self.allowErrorLogging() {
|
||||
glog.Warningf("Failed to get RecentStats(%q) while determining the next housekeeping: %v", self.info.Name, err)
|
||||
}
|
||||
} else if len(stats) == 2 {
|
||||
// TODO(vishnuk): Use no processes as a signal.
|
||||
// Raise the interval if usage hasn't changed in the last housekeeping.
|
||||
if stats[0].StatsEq(stats[1]) && (self.housekeepingInterval < self.maxHousekeepingInterval) {
|
||||
self.housekeepingInterval *= 2
|
||||
if self.housekeepingInterval > self.maxHousekeepingInterval {
|
||||
self.housekeepingInterval = self.maxHousekeepingInterval
|
||||
}
|
||||
} else if self.housekeepingInterval != *HousekeepingInterval {
|
||||
// Lower interval back to the baseline.
|
||||
self.housekeepingInterval = *HousekeepingInterval
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lastHousekeeping.Add(jitter(self.housekeepingInterval, 1.0))
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Implement stats collecting as a custom collector.
|
||||
func (c *containerData) housekeeping() {
|
||||
// Start any background goroutines - must be cleaned up in c.handler.Cleanup().
|
||||
c.handler.Start()
|
||||
|
||||
// Long housekeeping is either 100ms or half of the housekeeping interval.
|
||||
longHousekeeping := 100 * time.Millisecond
|
||||
if *HousekeepingInterval/2 < longHousekeeping {
|
||||
longHousekeeping = *HousekeepingInterval / 2
|
||||
}
|
||||
|
||||
// Housekeep every second.
|
||||
glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
|
||||
lastHousekeeping := time.Now()
|
||||
for {
|
||||
select {
|
||||
case <-c.stop:
|
||||
// Cleanup container resources before stopping housekeeping.
|
||||
c.handler.Cleanup()
|
||||
// Stop housekeeping when signaled.
|
||||
return
|
||||
default:
|
||||
// Perform housekeeping.
|
||||
start := time.Now()
|
||||
c.housekeepingTick()
|
||||
|
||||
// Log if housekeeping took too long.
|
||||
duration := time.Since(start)
|
||||
if duration >= longHousekeeping {
|
||||
glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Log usage if asked to do so.
|
||||
if c.logUsage {
|
||||
const numSamples = 60
|
||||
var empty time.Time
|
||||
stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples)
|
||||
if err != nil {
|
||||
if c.allowErrorLogging() {
|
||||
glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err)
|
||||
}
|
||||
} else if len(stats) < numSamples {
|
||||
// Ignore, not enough stats yet.
|
||||
} else {
|
||||
usageCpuNs := uint64(0)
|
||||
for i := range stats {
|
||||
if i > 0 {
|
||||
usageCpuNs += (stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total)
|
||||
}
|
||||
}
|
||||
usageMemory := stats[numSamples-1].Memory.Usage
|
||||
|
||||
instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds())
|
||||
usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds())
|
||||
usageInHuman := units.HumanSize(float64(usageMemory))
|
||||
glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
|
||||
}
|
||||
}
|
||||
|
||||
next := c.nextHousekeeping(lastHousekeeping)
|
||||
|
||||
// Schedule the next housekeeping. Sleep until that time.
|
||||
if time.Now().Before(next) {
|
||||
time.Sleep(next.Sub(time.Now()))
|
||||
} else {
|
||||
next = time.Now()
|
||||
}
|
||||
lastHousekeeping = next
|
||||
}
|
||||
}
|
||||
|
||||
func (c *containerData) housekeepingTick() {
|
||||
err := c.updateStats()
|
||||
if err != nil {
|
||||
if c.allowErrorLogging() {
|
||||
glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *containerData) updateSpec() error {
|
||||
spec, err := c.handler.GetSpec()
|
||||
if err != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !c.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
customMetrics, err := c.collectorManager.GetSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(customMetrics) > 0 {
|
||||
spec.HasCustomMetrics = true
|
||||
spec.CustomMetrics = customMetrics
|
||||
}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.info.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculate new smoothed load average using the new sample of runnable threads.
|
||||
// The decay used ensures that the load will stabilize on a new constant value within
|
||||
// 10 seconds.
|
||||
func (c *containerData) updateLoad(newLoad uint64) {
|
||||
if c.loadAvg < 0 {
|
||||
c.loadAvg = float64(newLoad) // initialize to the first seen sample for faster stabilization.
|
||||
} else {
|
||||
c.loadAvg = c.loadAvg*c.loadDecay + float64(newLoad)*(1.0-c.loadDecay)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *containerData) updateStats() error {
|
||||
stats, statsErr := c.handler.GetStats()
|
||||
if statsErr != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !c.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stats may be partially populated, push those before we return an error.
|
||||
statsErr = fmt.Errorf("%v, continuing to push stats", statsErr)
|
||||
}
|
||||
if stats == nil {
|
||||
return statsErr
|
||||
}
|
||||
if c.loadReader != nil {
|
||||
// TODO(vmarmol): Cache this path.
|
||||
path, err := c.handler.GetCgroupPath("cpu")
|
||||
if err == nil {
|
||||
loadStats, err := c.loadReader.GetCpuLoad(c.info.Name, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get load stat for %q - path %q, error %s", c.info.Name, path, err)
|
||||
}
|
||||
stats.TaskStats = loadStats
|
||||
c.updateLoad(loadStats.NrRunning)
|
||||
// convert to 'milliLoad' to avoid floats and preserve precision.
|
||||
stats.Cpu.LoadAverage = int32(c.loadAvg * 1000)
|
||||
}
|
||||
}
|
||||
if c.summaryReader != nil {
|
||||
err := c.summaryReader.AddSample(*stats)
|
||||
if err != nil {
|
||||
// Ignore summary errors for now.
|
||||
glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
|
||||
}
|
||||
}
|
||||
var customStatsErr error
|
||||
cm := c.collectorManager.(*collector.GenericCollectorManager)
|
||||
if len(cm.Collectors) > 0 {
|
||||
if cm.NextCollectionTime.Before(time.Now()) {
|
||||
customStats, err := c.updateCustomStats()
|
||||
if customStats != nil {
|
||||
stats.CustomMetrics = customStats
|
||||
}
|
||||
if err != nil {
|
||||
customStatsErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ref, err := c.handler.ContainerReference()
|
||||
if err != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !c.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = c.memoryCache.AddStats(ref, stats)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if statsErr != nil {
|
||||
return statsErr
|
||||
}
|
||||
return customStatsErr
|
||||
}
|
||||
|
||||
func (c *containerData) updateCustomStats() (map[string][]info.MetricVal, error) {
|
||||
_, customStats, customStatsErr := c.collectorManager.Collect()
|
||||
if customStatsErr != nil {
|
||||
if !c.handler.Exists() {
|
||||
return customStats, nil
|
||||
}
|
||||
customStatsErr = fmt.Errorf("%v, continuing to push custom stats", customStatsErr)
|
||||
}
|
||||
return customStats, customStatsErr
|
||||
}
|
||||
|
||||
func (c *containerData) updateSubcontainers() error {
|
||||
var subcontainers info.ContainerReferenceSlice
|
||||
subcontainers, err := c.handler.ListContainers(container.ListSelf)
|
||||
if err != nil {
|
||||
// Ignore errors if the container is dead.
|
||||
if !c.handler.Exists() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
sort.Sort(subcontainers)
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.info.Subcontainers = subcontainers
|
||||
return nil
|
||||
}
|
183
vendor/github.com/google/cadvisor/manager/machine.go
generated
vendored
Normal file
183
vendor/github.com/google/cadvisor/manager/machine.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/google/cadvisor/container/docker"
|
||||
"github.com/google/cadvisor/fs"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/cloudinfo"
|
||||
"github.com/google/cadvisor/utils/machine"
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
version "github.com/google/cadvisor/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var machineIdFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.")
|
||||
var bootIdFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.")
|
||||
|
||||
func getInfoFromFiles(filePaths string) string {
|
||||
if len(filePaths) == 0 {
|
||||
return ""
|
||||
}
|
||||
for _, file := range strings.Split(filePaths, ",") {
|
||||
id, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
return strings.TrimSpace(string(id))
|
||||
}
|
||||
}
|
||||
glog.Infof("Couldn't collect info from any of the files in %q", filePaths)
|
||||
return ""
|
||||
}
|
||||
|
||||
func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.MachineInfo, error) {
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
|
||||
cpuinfo, err := ioutil.ReadFile(filepath.Join(rootFs, "/proc/cpuinfo"))
|
||||
clockSpeed, err := machine.GetClockSpeed(cpuinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
memoryCapacity, err := machine.GetMachineMemoryCapacity()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesystems, err := fsInfo.GetGlobalFsInfo()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get global filesystem information: %v", err)
|
||||
}
|
||||
|
||||
diskMap, err := sysinfo.GetBlockDeviceInfo(sysFs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get disk map: %v", err)
|
||||
}
|
||||
|
||||
netDevices, err := sysinfo.GetNetworkDevices(sysFs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get network devices: %v", err)
|
||||
}
|
||||
|
||||
topology, numCores, err := machine.GetTopology(sysFs, string(cpuinfo))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get topology information: %v", err)
|
||||
}
|
||||
|
||||
systemUUID, err := sysinfo.GetSystemUUID(sysFs)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get system UUID: %v", err)
|
||||
}
|
||||
|
||||
realCloudInfo := cloudinfo.NewRealCloudInfo()
|
||||
cloudProvider := realCloudInfo.GetCloudProvider()
|
||||
instanceType := realCloudInfo.GetInstanceType()
|
||||
instanceID := realCloudInfo.GetInstanceID()
|
||||
|
||||
machineInfo := &info.MachineInfo{
|
||||
NumCores: numCores,
|
||||
CpuFrequency: clockSpeed,
|
||||
MemoryCapacity: memoryCapacity,
|
||||
DiskMap: diskMap,
|
||||
NetworkDevices: netDevices,
|
||||
Topology: topology,
|
||||
MachineID: getInfoFromFiles(filepath.Join(rootFs, *machineIdFilePath)),
|
||||
SystemUUID: systemUUID,
|
||||
BootID: getInfoFromFiles(filepath.Join(rootFs, *bootIdFilePath)),
|
||||
CloudProvider: cloudProvider,
|
||||
InstanceType: instanceType,
|
||||
InstanceID: instanceID,
|
||||
}
|
||||
|
||||
for _, fs := range filesystems {
|
||||
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: fs.Inodes})
|
||||
}
|
||||
|
||||
return machineInfo, nil
|
||||
}
|
||||
|
||||
func getVersionInfo() (*info.VersionInfo, error) {
|
||||
|
||||
kernel_version := getKernelVersion()
|
||||
container_os := getContainerOsVersion()
|
||||
docker_version := getDockerVersion()
|
||||
|
||||
return &info.VersionInfo{
|
||||
KernelVersion: kernel_version,
|
||||
ContainerOsVersion: container_os,
|
||||
DockerVersion: docker_version,
|
||||
CadvisorVersion: version.Info["version"],
|
||||
CadvisorRevision: version.Info["revision"],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getContainerOsVersion() string {
|
||||
container_os := "Unknown"
|
||||
os_release, err := ioutil.ReadFile("/etc/os-release")
|
||||
if err == nil {
|
||||
// We might be running in a busybox or some hand-crafted image.
|
||||
// It's useful to know why cadvisor didn't come up.
|
||||
for _, line := range strings.Split(string(os_release), "\n") {
|
||||
parsed := strings.Split(line, "\"")
|
||||
if len(parsed) == 3 && parsed[0] == "PRETTY_NAME=" {
|
||||
container_os = parsed[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return container_os
|
||||
}
|
||||
|
||||
func getDockerVersion() string {
|
||||
docker_version := "Unknown"
|
||||
client, err := docker.Client()
|
||||
if err == nil {
|
||||
version, err := client.Version()
|
||||
if err == nil {
|
||||
docker_version = version.Get("Version")
|
||||
}
|
||||
}
|
||||
return docker_version
|
||||
}
|
||||
|
||||
func getKernelVersion() string {
|
||||
uname := &syscall.Utsname{}
|
||||
|
||||
if err := syscall.Uname(uname); err != nil {
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
release := make([]byte, len(uname.Release))
|
||||
i := 0
|
||||
for _, c := range uname.Release {
|
||||
release[i] = byte(c)
|
||||
i++
|
||||
}
|
||||
release = release[:bytes.IndexByte(release, 0)]
|
||||
|
||||
return string(release)
|
||||
}
|
1236
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
Normal file
1236
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
119
vendor/github.com/google/cadvisor/manager/manager_mock.go
generated
vendored
Normal file
119
vendor/github.com/google/cadvisor/manager/manager_mock.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build test
|
||||
|
||||
package manager
|
||||
|
||||
import (
|
||||
"github.com/google/cadvisor/events"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/info/v2"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type ManagerMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (c *ManagerMock) Start() error {
|
||||
args := c.Called()
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) Stop() error {
|
||||
args := c.Called()
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetContainerInfo(name string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
|
||||
args := c.Called(name, query)
|
||||
return args.Get(0).(*info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
|
||||
args := c.Called(containerName, query)
|
||||
return args.Get(0).([]*info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
|
||||
args := c.Called(query)
|
||||
return args.Get(0).(map[string]info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) DockerContainer(name string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
|
||||
args := c.Called(name, query)
|
||||
return args.Get(0).(info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error) {
|
||||
args := c.Called(containerName, options)
|
||||
return args.Get(0).(map[string]v2.ContainerSpec), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error) {
|
||||
args := c.Called(containerName, options)
|
||||
return args.Get(0).(map[string]v2.DerivedStats), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error) {
|
||||
args := c.Called(containerName, options)
|
||||
return args.Get(0).(map[string]*info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) Exists(name string) bool {
|
||||
args := c.Called(name)
|
||||
return args.Get(0).(bool)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) WatchForEvents(queryuest *events.Request, passedChannel chan *info.Event) error {
|
||||
args := c.Called(queryuest, passedChannel)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetPastEvents(queryuest *events.Request) ([]*info.Event, error) {
|
||||
args := c.Called(queryuest)
|
||||
return args.Get(0).([]*info.Event), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetMachineInfo() (*info.MachineInfo, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).(*info.MachineInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetVersionInfo() (*info.VersionInfo, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).(*info.VersionInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetFsInfo() ([]v2.FsInfo, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).([]v2.FsInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetProcessList(name string, options v2.RequestOptions) ([]v2.ProcessInfo, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).([]v2.ProcessInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) DockerInfo() (DockerStatus, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).(DockerStatus), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) DockerImages() ([]DockerImage, error) {
|
||||
args := c.Called()
|
||||
return args.Get(0).([]DockerImage), args.Error(1)
|
||||
}
|
Reference in New Issue
Block a user