mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-11-03 15:25:19 +00:00
udpate cadvisor dependency to v0.28.2
This commit is contained in:
2
vendor/github.com/google/cadvisor/manager/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/manager/BUILD
generated
vendored
@@ -15,6 +15,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/cache/memory:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/collector:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/containerd:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/crio:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/docker:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/raw:go_default_library",
|
||||
@@ -34,6 +35,7 @@ go_library(
|
||||
"//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/version:go_default_library",
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library",
|
||||
"//vendor/k8s.io/utils/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
108
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
108
vendor/github.com/google/cadvisor/manager/container.go
generated
vendored
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// Housekeeping interval.
|
||||
@@ -65,8 +66,11 @@ type containerData struct {
|
||||
housekeepingInterval time.Duration
|
||||
maxHousekeepingInterval time.Duration
|
||||
allowDynamicHousekeeping bool
|
||||
lastUpdatedTime time.Time
|
||||
infoLastUpdatedTime time.Time
|
||||
statsLastUpdatedTime time.Time
|
||||
lastErrorTime time.Time
|
||||
// used to track time
|
||||
clock clock.Clock
|
||||
|
||||
// Decay value used for load average smoothing. Interval length of 10 seconds is used.
|
||||
loadDecay float64
|
||||
@@ -77,6 +81,9 @@ type containerData struct {
|
||||
// Tells the container to stop.
|
||||
stop chan bool
|
||||
|
||||
// Tells the container to immediately collect stats
|
||||
onDemandChan chan chan struct{}
|
||||
|
||||
// Runs custom metric collectors.
|
||||
collectorManager collector.CollectorManager
|
||||
|
||||
@@ -110,16 +117,43 @@ func (c *containerData) Stop() error {
|
||||
}
|
||||
|
||||
func (c *containerData) allowErrorLogging() bool {
|
||||
if time.Since(c.lastErrorTime) > time.Minute {
|
||||
c.lastErrorTime = time.Now()
|
||||
if c.clock.Since(c.lastErrorTime) > time.Minute {
|
||||
c.lastErrorTime = c.clock.Now()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// OnDemandHousekeeping performs housekeeping on the container and blocks until it has completed.
|
||||
// It is designed to be used in conjunction with periodic housekeeping, and will cause the timer for
|
||||
// periodic housekeeping to reset. This should be used sparingly, as calling OnDemandHousekeeping frequently
|
||||
// can have serious performance costs.
|
||||
func (c *containerData) OnDemandHousekeeping(maxAge time.Duration) {
|
||||
if c.clock.Since(c.statsLastUpdatedTime) > maxAge {
|
||||
housekeepingFinishedChan := make(chan struct{})
|
||||
c.onDemandChan <- housekeepingFinishedChan
|
||||
select {
|
||||
case <-c.stop:
|
||||
case <-housekeepingFinishedChan:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// notifyOnDemand notifies all calls to OnDemandHousekeeping that housekeeping is finished
|
||||
func (c *containerData) notifyOnDemand() {
|
||||
for {
|
||||
select {
|
||||
case finishedChan := <-c.onDemandChan:
|
||||
close(finishedChan)
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *containerData) GetInfo(shouldUpdateSubcontainers bool) (*containerInfo, error) {
|
||||
// Get spec and subcontainers.
|
||||
if time.Since(c.lastUpdatedTime) > 5*time.Second {
|
||||
if c.clock.Since(c.infoLastUpdatedTime) > 5*time.Second {
|
||||
err := c.updateSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -130,7 +164,7 @@ func (c *containerData) GetInfo(shouldUpdateSubcontainers bool) (*containerInfo,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
c.lastUpdatedTime = time.Now()
|
||||
c.infoLastUpdatedTime = c.clock.Now()
|
||||
}
|
||||
// Make a copy of the info for the user.
|
||||
c.lock.Lock()
|
||||
@@ -310,7 +344,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (*containerData, error) {
|
||||
func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, clock clock.Clock) (*containerData, error) {
|
||||
if memoryCache == nil {
|
||||
return nil, fmt.Errorf("nil memory storage")
|
||||
}
|
||||
@@ -332,6 +366,8 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
|
||||
loadAvg: -1.0, // negative value indicates uninitialized.
|
||||
stop: make(chan bool, 1),
|
||||
collectorManager: collectorManager,
|
||||
onDemandChan: make(chan chan struct{}, 100),
|
||||
clock: clock,
|
||||
}
|
||||
cont.info.ContainerReference = ref
|
||||
|
||||
@@ -362,7 +398,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
|
||||
}
|
||||
|
||||
// Determine when the next housekeeping should occur.
|
||||
func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Time {
|
||||
func (self *containerData) nextHousekeepingInterval() time.Duration {
|
||||
if self.allowDynamicHousekeeping {
|
||||
var empty time.Time
|
||||
stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2)
|
||||
@@ -385,7 +421,7 @@ func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Tim
|
||||
}
|
||||
}
|
||||
|
||||
return lastHousekeeping.Add(jitter(self.housekeepingInterval, 1.0))
|
||||
return jitter(self.housekeepingInterval, 1.0)
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Implement stats collecting as a custom collector.
|
||||
@@ -411,24 +447,19 @@ func (c *containerData) housekeeping() {
|
||||
|
||||
// Housekeep every second.
|
||||
glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
|
||||
lastHousekeeping := time.Now()
|
||||
houseKeepingTimer := c.clock.NewTimer(0 * time.Second)
|
||||
defer houseKeepingTimer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-c.stop:
|
||||
// Stop housekeeping when signaled.
|
||||
if !c.housekeepingTick(houseKeepingTimer.C(), longHousekeeping) {
|
||||
return
|
||||
default:
|
||||
// Perform housekeeping.
|
||||
start := time.Now()
|
||||
c.housekeepingTick()
|
||||
|
||||
// Log if housekeeping took too long.
|
||||
duration := time.Since(start)
|
||||
if duration >= longHousekeeping {
|
||||
glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
|
||||
}
|
||||
// Stop and drain the timer so that it is safe to reset it
|
||||
if !houseKeepingTimer.Stop() {
|
||||
select {
|
||||
case <-houseKeepingTimer.C():
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Log usage if asked to do so.
|
||||
if c.logUsage {
|
||||
const numSamples = 60
|
||||
@@ -455,26 +486,35 @@ func (c *containerData) housekeeping() {
|
||||
glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman)
|
||||
}
|
||||
}
|
||||
|
||||
next := c.nextHousekeeping(lastHousekeeping)
|
||||
|
||||
// Schedule the next housekeeping. Sleep until that time.
|
||||
if time.Now().Before(next) {
|
||||
time.Sleep(next.Sub(time.Now()))
|
||||
} else {
|
||||
next = time.Now()
|
||||
}
|
||||
lastHousekeeping = next
|
||||
houseKeepingTimer.Reset(c.nextHousekeepingInterval())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *containerData) housekeepingTick() {
|
||||
func (c *containerData) housekeepingTick(timer <-chan time.Time, longHousekeeping time.Duration) bool {
|
||||
select {
|
||||
case <-c.stop:
|
||||
// Stop housekeeping when signaled.
|
||||
return false
|
||||
case finishedChan := <-c.onDemandChan:
|
||||
// notify the calling function once housekeeping has completed
|
||||
defer close(finishedChan)
|
||||
case <-timer:
|
||||
}
|
||||
start := c.clock.Now()
|
||||
err := c.updateStats()
|
||||
if err != nil {
|
||||
if c.allowErrorLogging() {
|
||||
glog.Infof("Failed to update stats for container \"%s\": %s", c.info.Name, err)
|
||||
}
|
||||
}
|
||||
// Log if housekeeping took too long.
|
||||
duration := c.clock.Since(start)
|
||||
if duration >= longHousekeeping {
|
||||
glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration)
|
||||
}
|
||||
c.notifyOnDemand()
|
||||
c.statsLastUpdatedTime = c.clock.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *containerData) updateSpec() error {
|
||||
@@ -550,7 +590,7 @@ func (c *containerData) updateStats() error {
|
||||
var customStatsErr error
|
||||
cm := c.collectorManager.(*collector.GenericCollectorManager)
|
||||
if len(cm.Collectors) > 0 {
|
||||
if cm.NextCollectionTime.Before(time.Now()) {
|
||||
if cm.NextCollectionTime.Before(c.clock.Now()) {
|
||||
customStats, err := c.updateCustomStats()
|
||||
if customStats != nil {
|
||||
stats.CustomMetrics = customStats
|
||||
|
||||
23
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
23
vendor/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/google/cadvisor/cache/memory"
|
||||
"github.com/google/cadvisor/collector"
|
||||
"github.com/google/cadvisor/container"
|
||||
"github.com/google/cadvisor/container/containerd"
|
||||
"github.com/google/cadvisor/container/crio"
|
||||
"github.com/google/cadvisor/container/docker"
|
||||
"github.com/google/cadvisor/container/raw"
|
||||
@@ -49,6 +50,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
var globalHousekeepingInterval = flag.Duration("global_housekeeping_interval", 1*time.Minute, "Interval between global housekeepings")
|
||||
@@ -279,6 +281,11 @@ func (self *manager) Start() error {
|
||||
self.containerWatchers = append(self.containerWatchers, watcher)
|
||||
}
|
||||
|
||||
err = containerd.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Registration of the containerd container factory failed: %v", err)
|
||||
}
|
||||
|
||||
err = crio.Register(self, self.fsInfo, self.ignoreMetrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Registration of the crio container factory failed: %v", err)
|
||||
@@ -707,6 +714,18 @@ func (self *manager) getRequestedContainers(containerName string, options v2.Req
|
||||
default:
|
||||
return containersMap, fmt.Errorf("invalid request type %q", options.IdType)
|
||||
}
|
||||
if options.MaxAge != nil {
|
||||
// update stats for all containers in containersMap
|
||||
var waitGroup sync.WaitGroup
|
||||
waitGroup.Add(len(containersMap))
|
||||
for _, container := range containersMap {
|
||||
go func(cont *containerData) {
|
||||
cont.OnDemandHousekeeping(*options.MaxAge)
|
||||
waitGroup.Done()
|
||||
}(container)
|
||||
}
|
||||
waitGroup.Wait()
|
||||
}
|
||||
return containersMap, nil
|
||||
}
|
||||
|
||||
@@ -804,6 +823,8 @@ func (m *manager) Exists(containerName string) bool {
|
||||
func (m *manager) GetProcessList(containerName string, options v2.RequestOptions) ([]v2.ProcessInfo, error) {
|
||||
// override recursive. Only support single container listing.
|
||||
options.Recursive = false
|
||||
// override MaxAge. ProcessList does not require updated stats.
|
||||
options.MaxAge = nil
|
||||
conts, err := m.getRequestedContainers(containerName, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -919,7 +940,7 @@ func (m *manager) createContainerLocked(containerName string, watchSource watche
|
||||
}
|
||||
|
||||
logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer
|
||||
cont, err := newContainerData(containerName, m.memoryCache, handler, logUsage, collectorManager, m.maxHousekeepingInterval, m.allowDynamicHousekeeping)
|
||||
cont, err := newContainerData(containerName, m.memoryCache, handler, logUsage, collectorManager, m.maxHousekeepingInterval, m.allowDynamicHousekeeping, clock.RealClock{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
2
vendor/github.com/google/cadvisor/manager/watcher/raw/BUILD
generated
vendored
2
vendor/github.com/google/cadvisor/manager/watcher/raw/BUILD
generated
vendored
@@ -6,11 +6,11 @@ go_library(
|
||||
importpath = "github.com/google/cadvisor/manager/watcher/raw",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/common:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/container/libcontainer:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/manager/watcher:go_default_library",
|
||||
"//vendor/golang.org/x/exp/inotify:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
16
vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
16
vendor/github.com/google/cadvisor/manager/watcher/raw/raw.go
generated
vendored
@@ -27,8 +27,8 @@ import (
|
||||
"github.com/google/cadvisor/container/libcontainer"
|
||||
"github.com/google/cadvisor/manager/watcher"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/exp/inotify"
|
||||
)
|
||||
|
||||
type rawContainerWatcher struct {
|
||||
@@ -121,7 +121,7 @@ func (self *rawContainerWatcher) watchDirectory(dir string, containerName string
|
||||
if cleanup {
|
||||
_, err := self.watcher.RemoveWatch(containerName, dir)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to remove fsnotify watch for %q: %v", dir, err)
|
||||
glog.Warningf("Failed to remove inotify watch for %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -152,16 +152,18 @@ func (self *rawContainerWatcher) watchDirectory(dir string, containerName string
|
||||
return alreadyWatching, nil
|
||||
}
|
||||
|
||||
func (self *rawContainerWatcher) processEvent(event fsnotify.Event, events chan watcher.ContainerEvent) error {
|
||||
// Convert the fsnotify event type to a container create or delete.
|
||||
func (self *rawContainerWatcher) processEvent(event *inotify.Event, events chan watcher.ContainerEvent) error {
|
||||
// Convert the inotify event type to a container create or delete.
|
||||
var eventType watcher.ContainerEventType
|
||||
switch {
|
||||
case event.Op == fsnotify.Create:
|
||||
case (event.Mask & inotify.IN_CREATE) > 0:
|
||||
eventType = watcher.ContainerAdd
|
||||
case event.Op == fsnotify.Remove:
|
||||
case (event.Mask & inotify.IN_DELETE) > 0:
|
||||
eventType = watcher.ContainerDelete
|
||||
case event.Op == fsnotify.Rename:
|
||||
case (event.Mask & inotify.IN_MOVED_FROM) > 0:
|
||||
eventType = watcher.ContainerDelete
|
||||
case (event.Mask & inotify.IN_MOVED_TO) > 0:
|
||||
eventType = watcher.ContainerAdd
|
||||
default:
|
||||
// Ignore other events.
|
||||
return nil
|
||||
|
||||
Reference in New Issue
Block a user