updated cadvisor version

This commit is contained in:
bindata-mockuser 2016-08-02 16:01:00 -07:00
parent fa5f3b816e
commit e00a35ccad
18 changed files with 211 additions and 161 deletions

160
Godeps/Godeps.json generated
View File

@ -1035,203 +1035,203 @@
},
{
"ImportPath": "github.com/google/cadvisor/api",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/cache/memory",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/collector",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/container",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/container/common",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/container/docker",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/container/libcontainer",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/container/raw",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/container/rkt",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/container/systemd",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/devicemapper",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/events",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/fs",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/healthz",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/http",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/http/mux",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/info/v1",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/info/v1/test",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/info/v2",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/machine",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/manager",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/manager/watcher",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/manager/watcher/raw",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/manager/watcher/rkt",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/metrics",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/pages",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/pages/static",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/storage",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/summary",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/cloudinfo",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/cpuload",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/docker",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/oomparser",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/sysfs",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/sysinfo",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/utils/tail",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/validate",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/cadvisor/version",
"Comment": "v0.23.2-63-ge47efa0",
"Rev": "e47efa0e8af65e9a2a2eb2ce955e156eac067852"
"Comment": "v0.23.2-79-gc6c06d4",
"Rev": "c6c06d440ab2fcaae9211dda6dcbbaa1e98a054b"
},
{
"ImportPath": "github.com/google/certificate-transparency/go",

View File

@ -78,7 +78,7 @@ func New(port uint, runtime string) (Interface, error) {
}
// Create and start the cAdvisor container manager.
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisorMetrics.MetricSet{cadvisorMetrics.NetworkTcpUsageMetrics: struct{}{}})
m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping, cadvisorMetrics.MetricSet{cadvisorMetrics.NetworkTcpUsageMetrics: struct{}{}}, http.DefaultClient)
if err != nil {
return nil, err
}

View File

@ -125,14 +125,14 @@ func (sb *summaryBuilder) build() (*stats.Summary, error) {
AvailableBytes: &sb.rootFsInfo.Available,
CapacityBytes: &sb.rootFsInfo.Capacity,
UsedBytes: &sb.rootFsInfo.Usage,
InodesFree: &sb.rootFsInfo.InodesFree},
InodesFree: sb.rootFsInfo.InodesFree},
StartTime: rootStats.StartTime,
Runtime: &stats.RuntimeStats{
ImageFs: &stats.FsStats{
AvailableBytes: &sb.imageFsInfo.Available,
CapacityBytes: &sb.imageFsInfo.Capacity,
UsedBytes: &sb.imageStats.TotalStorageBytes,
InodesFree: &sb.imageFsInfo.InodesFree,
InodesFree: sb.imageFsInfo.InodesFree,
},
},
}
@ -164,14 +164,14 @@ func (sb *summaryBuilder) containerInfoV2FsStats(
cs.Logs = &stats.FsStats{
AvailableBytes: &sb.rootFsInfo.Available,
CapacityBytes: &sb.rootFsInfo.Capacity,
InodesFree: &sb.rootFsInfo.InodesFree,
InodesFree: sb.rootFsInfo.InodesFree,
}
// The container rootFs lives on the imageFs devices (which may not be the node root fs)
cs.Rootfs = &stats.FsStats{
AvailableBytes: &sb.imageFsInfo.Available,
CapacityBytes: &sb.imageFsInfo.Capacity,
InodesFree: &sb.imageFsInfo.InodesFree,
InodesFree: sb.imageFsInfo.InodesFree,
}
lcs, found := sb.latestContainerStats(info)
if !found {

View File

@ -118,15 +118,17 @@ func TestBuildSummary(t *testing.T) {
"/pod2-c0": summaryTestContainerInfo(seedPod2Container, pName2, namespace2, cName20),
}
freeRootfsInodes := rootfsInodesFree
rootfs := v2.FsInfo{
Capacity: rootfsCapacity,
Available: rootfsAvailable,
InodesFree: rootfsInodesFree,
InodesFree: &freeRootfsInodes,
}
freeImagefsInodes := imagefsInodesFree
imagefs := v2.FsInfo{
Capacity: imagefsCapacity,
Available: imagefsAvailable,
InodesFree: imagefsInodesFree,
InodesFree: &freeImagefsInodes,
}
// memory limit overrides for each container (used to test available bytes if a memory limit is known)

View File

@ -37,6 +37,9 @@ type GenericCollector struct {
//holds information necessary to extract metrics
info *collectorInfo
// The Http client to use when connecting to metric endpoints
httpClient *http.Client
}
type collectorInfo struct {
@ -52,7 +55,7 @@ type collectorInfo struct {
}
//Returns a new collector using the information extracted from the configfile
func NewCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler) (*GenericCollector, error) {
func NewCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*GenericCollector, error) {
var configInJSON Config
err := json.Unmarshal(configFile, &configInJSON)
if err != nil {
@ -102,6 +105,7 @@ func NewCollector(collectorName string, configFile []byte, metricCountLimit int,
regexps: regexprs,
metricCountLimit: metricCountLimit,
},
httpClient: httpClient,
}, nil
}
@ -134,7 +138,7 @@ func (collector *GenericCollector) Collect(metrics map[string][]v1.MetricVal) (t
nextCollectionTime := currentTime.Add(time.Duration(collector.info.minPollingFrequency))
uri := collector.configFile.Endpoint.URL
response, err := http.Get(uri)
response, err := collector.httpClient.Get(uri)
if err != nil {
return nextCollectionTime, nil, err
}

View File

@ -44,10 +44,13 @@ type PrometheusCollector struct {
// Limit for the number of scaped metrics. If the count is higher,
// no metrics will be returned.
metricCountLimit int
// The Http client to use when connecting to metric endpoints
httpClient *http.Client
}
//Returns a new collector using the information extracted from the configfile
func NewPrometheusCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler) (*PrometheusCollector, error) {
func NewPrometheusCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*PrometheusCollector, error) {
var configInJSON Prometheus
err := json.Unmarshal(configFile, &configInJSON)
if err != nil {
@ -88,6 +91,7 @@ func NewPrometheusCollector(collectorName string, configFile []byte, metricCount
configFile: configInJSON,
metricsSet: metricsSet,
metricCountLimit: metricCountLimit,
httpClient: httpClient,
}, nil
}
@ -111,7 +115,8 @@ func getMetricData(line string) string {
func (collector *PrometheusCollector) GetSpec() []v1.MetricSpec {
specs := []v1.MetricSpec{}
response, err := http.Get(collector.configFile.Endpoint.URL)
response, err := collector.httpClient.Get(collector.configFile.Endpoint.URL)
if err != nil {
return specs
}
@ -157,7 +162,7 @@ func (collector *PrometheusCollector) Collect(metrics map[string][]v1.MetricVal)
nextCollectionTime := currentTime.Add(time.Duration(collector.pollingFrequency))
uri := collector.configFile.Endpoint.URL
response, err := http.Get(uri)
response, err := collector.httpClient.Get(uri)
if err != nil {
return nextCollectionTime, nil, err
}

View File

@ -294,7 +294,10 @@ func (h *dockerFsHandler) Usage() (uint64, uint64) {
if h.thinPoolWatcher != nil {
thinPoolUsage, err := h.thinPoolWatcher.GetUsage(h.deviceID)
if err != nil {
glog.Errorf("unable to get fs usage from thin pool for device %v: %v", h.deviceID, err)
// TODO: ideally we should keep track of how many times we failed to get the usage for this
// device vs how many refreshes of the cache there have been, and display an error e.g. if we've
// had at least 1 refresh and we still can't find the device.
glog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err)
} else {
baseUsage = thinPoolUsage
usage += thinPoolUsage

View File

@ -165,6 +165,37 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
return spec, nil
}
func fsToFsStats(fs *fs.Fs) info.FsStats {
inodes := uint64(0)
inodesFree := uint64(0)
hasInodes := fs.InodesFree != nil
if hasInodes {
inodes = *fs.Inodes
inodesFree = *fs.InodesFree
}
return info.FsStats{
Device: fs.Device,
Type: fs.Type.String(),
Limit: fs.Capacity,
Usage: fs.Capacity - fs.Free,
HasInodes: hasInodes,
Inodes: inodes,
InodesFree: inodesFree,
Available: fs.Available,
ReadsCompleted: fs.DiskStats.ReadsCompleted,
ReadsMerged: fs.DiskStats.ReadsMerged,
SectorsRead: fs.DiskStats.SectorsRead,
ReadTime: fs.DiskStats.ReadTime,
WritesCompleted: fs.DiskStats.WritesCompleted,
WritesMerged: fs.DiskStats.WritesMerged,
SectorsWritten: fs.DiskStats.SectorsWritten,
WriteTime: fs.DiskStats.WriteTime,
IoInProgress: fs.DiskStats.IoInProgress,
IoTime: fs.DiskStats.IoTime,
WeightedIoTime: fs.DiskStats.WeightedIoTime,
}
}
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
// Get Filesystem information only for the root cgroup.
if isRootCgroup(self.name) {
@ -172,27 +203,9 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
if err != nil {
return err
}
for _, fs := range filesystems {
stats.Filesystem = append(stats.Filesystem,
info.FsStats{
Device: fs.Device,
Type: fs.Type.String(),
Limit: fs.Capacity,
Usage: fs.Capacity - fs.Free,
Available: fs.Available,
InodesFree: fs.InodesFree,
ReadsCompleted: fs.DiskStats.ReadsCompleted,
ReadsMerged: fs.DiskStats.ReadsMerged,
SectorsRead: fs.DiskStats.SectorsRead,
ReadTime: fs.DiskStats.ReadTime,
WritesCompleted: fs.DiskStats.WritesCompleted,
WritesMerged: fs.DiskStats.WritesMerged,
SectorsWritten: fs.DiskStats.SectorsWritten,
WriteTime: fs.DiskStats.WriteTime,
IoInProgress: fs.DiskStats.IoInProgress,
IoTime: fs.DiskStats.IoTime,
WeightedIoTime: fs.DiskStats.WeightedIoTime,
})
for i := range filesystems {
fs := filesystems[i]
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
}
} else if len(self.externalMounts) > 0 {
var mountSet map[string]struct{}
@ -204,26 +217,9 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
if err != nil {
return err
}
for _, fs := range filesystems {
stats.Filesystem = append(stats.Filesystem,
info.FsStats{
Device: fs.Device,
Type: fs.Type.String(),
Limit: fs.Capacity,
Usage: fs.Capacity - fs.Free,
InodesFree: fs.InodesFree,
ReadsCompleted: fs.DiskStats.ReadsCompleted,
ReadsMerged: fs.DiskStats.ReadsMerged,
SectorsRead: fs.DiskStats.SectorsRead,
ReadTime: fs.DiskStats.ReadTime,
WritesCompleted: fs.DiskStats.WritesCompleted,
WritesMerged: fs.DiskStats.WritesMerged,
SectorsWritten: fs.DiskStats.SectorsWritten,
WriteTime: fs.DiskStats.WriteTime,
IoInProgress: fs.DiskStats.IoInProgress,
IoTime: fs.DiskStats.IoTime,
WeightedIoTime: fs.DiskStats.WeightedIoTime,
})
for i := range filesystems {
fs := filesystems[i]
stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
}
}
return nil

View File

@ -74,7 +74,7 @@ func (w *ThinPoolWatcher) Start() {
// print latency for refresh
duration := time.Since(start)
glog.V(3).Infof("thin_ls(%d) took %s", start.Unix(), duration)
glog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration)
}
}
}
@ -115,7 +115,7 @@ func (w *ThinPoolWatcher) Refresh() error {
}
if currentlyReserved {
glog.V(4).Infof("metadata for %v is currently reserved; releasing", w.poolName)
glog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName)
_, err = w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage)
if err != nil {
err = fmt.Errorf("error releasing metadata snapshot for %v: %v", w.poolName, err)
@ -123,7 +123,7 @@ func (w *ThinPoolWatcher) Refresh() error {
}
}
glog.Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
glog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName)
// NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup
// message'. It's not needed for thin pools.
if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil {

View File

@ -322,7 +322,10 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
fs.Capacity, fs.Free, fs.Available, err = getZfstats(device)
fs.Type = ZFS
default:
fs.Capacity, fs.Free, fs.Available, fs.Inodes, fs.InodesFree, err = getVfsStats(partition.mountpoint)
var inodes, inodesFree uint64
fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint)
fs.Inodes = &inodes
fs.InodesFree = &inodesFree
fs.Type = VFS
}
if err != nil {

View File

@ -40,8 +40,8 @@ type Fs struct {
Capacity uint64
Free uint64
Available uint64
Inodes uint64
InodesFree uint64
Inodes *uint64
InodesFree *uint64
DiskStats DiskStats
}

View File

@ -415,6 +415,12 @@ type FsStats struct {
// Number of bytes available for non-root user.
Available uint64 `json:"available"`
// HasInodes when true, indicates that Inodes info will be available.
HasInodes bool `json:"has_inodes"`
// Number of Inodes
Inodes uint64 `json:"inodes"`
// Number of available Inodes
InodesFree uint64 `json:"inodes_free"`

View File

@ -26,6 +26,9 @@ type FsInfo struct {
// Total number of inodes available on the filesystem.
Inodes uint64 `json:"inodes"`
// HasInodes when true, indicates that Inodes info will be available.
HasInodes bool `json:"has_inodes"`
}
type Node struct {

View File

@ -217,8 +217,11 @@ type FsInfo struct {
// Labels associated with this filesystem.
Labels []string `json:"labels"`
// Number of available Inodes.
InodesFree uint64 `json:"inodes_free"`
// Number of Inodes.
Inodes *uint64 `json:"inodes,omitempty"`
// Number of available Inodes (if known)
InodesFree *uint64 `json:"inodes_free,omitempty"`
}
type RequestOptions struct {

View File

@ -24,18 +24,18 @@ import (
func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
var result []MachineFsStats
for _, stat := range fsStats {
for i := range fsStats {
stat := fsStats[i]
readDuration := time.Millisecond * time.Duration(stat.ReadTime)
writeDuration := time.Millisecond * time.Duration(stat.WriteTime)
ioDuration := time.Millisecond * time.Duration(stat.IoTime)
weightedDuration := time.Millisecond * time.Duration(stat.WeightedIoTime)
result = append(result, MachineFsStats{
Device: stat.Device,
Type: stat.Type,
Capacity: &stat.Limit,
Usage: &stat.Usage,
Available: &stat.Available,
InodesFree: &stat.InodesFree,
machineFsStat := MachineFsStats{
Device: stat.Device,
Type: stat.Type,
Capacity: &stat.Limit,
Usage: &stat.Usage,
Available: &stat.Available,
DiskStats: DiskStats{
ReadsCompleted: &stat.ReadsCompleted,
ReadsMerged: &stat.ReadsMerged,
@ -49,7 +49,11 @@ func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
IoDuration: &ioDuration,
WeightedIoDuration: &weightedDuration,
},
})
}
if stat.HasInodes {
machineFsStat.InodesFree = &stat.InodesFree
}
result = append(result, machineFsStat)
}
return result
}
@ -57,7 +61,8 @@ func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats {
var stats []MachineStats
var last *v1.ContainerStats
for _, val := range cont.Stats {
for i := range cont.Stats {
val := cont.Stats[i]
stat := MachineStats{
Timestamp: val.Timestamp,
}

View File

@ -110,8 +110,13 @@ func Info(sysFs sysfs.SysFs, fsInfo fs.FsInfo, inHostNamespace bool) (*info.Mach
InstanceID: instanceID,
}
for _, fs := range filesystems {
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: fs.Inodes})
for i := range filesystems {
fs := filesystems[i]
inodes := uint64(0)
if fs.Inodes != nil {
inodes = *fs.Inodes
}
machineInfo.Filesystems = append(machineInfo.Filesystems, info.FsInfo{Device: fs.Device, Type: fs.Type.String(), Capacity: fs.Capacity, Inodes: inodes, HasInodes: fs.Inodes != nil})
}
return machineInfo, nil

View File

@ -44,6 +44,8 @@ import (
"github.com/google/cadvisor/utils/sysfs"
"github.com/google/cadvisor/version"
"net/http"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
@ -125,7 +127,7 @@ type Manager interface {
}
// New takes a memory storage and returns a new manager.
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, ignoreMetricsSet container.MetricSet) (Manager, error) {
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, ignoreMetricsSet container.MetricSet, collectorHttpClient *http.Client) (Manager, error) {
if memoryCache == nil {
return nil, fmt.Errorf("manager requires memory storage")
}
@ -182,6 +184,7 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingIn
ignoreMetrics: ignoreMetricsSet,
containerWatchers: []watcher.ContainerWatcher{},
eventsChannel: eventsChannel,
collectorHttpClient: collectorHttpClient,
}
machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace)
@ -226,6 +229,7 @@ type manager struct {
ignoreMetrics container.MetricSet
containerWatchers []watcher.ContainerWatcher
eventsChannel chan watcher.ContainerEvent
collectorHttpClient *http.Client
}
// Start the container manager.
@ -668,7 +672,8 @@ func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
}
}
fsInfo := []v2.FsInfo{}
for _, fs := range stats[0].Filesystem {
for i := range stats[0].Filesystem {
fs := stats[0].Filesystem[i]
if len(label) != 0 && fs.Device != dev {
continue
}
@ -680,6 +685,7 @@ func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
if err != nil {
return nil, err
}
fi := v2.FsInfo{
Device: fs.Device,
Mountpoint: mountpoint,
@ -687,7 +693,10 @@ func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
Usage: fs.Usage,
Available: fs.Available,
Labels: labels,
InodesFree: fs.InodesFree,
}
if fs.HasInodes {
fi.Inodes = &fs.Inodes
fi.InodesFree = &fs.InodesFree
}
fsInfo = append(fsInfo, fi)
}
@ -752,7 +761,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
glog.V(3).Infof("Got config from %q: %q", v, configFile)
if strings.HasPrefix(k, "prometheus") || strings.HasPrefix(k, "Prometheus") {
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler)
newCollector, err := collector.NewPrometheusCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
if err != nil {
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
@ -763,7 +772,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
return err
}
} else {
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler)
newCollector, err := collector.NewCollector(k, configFile, *applicationMetricsCountLimit, cont.handler, m.collectorHttpClient)
if err != nil {
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
return err

View File

@ -493,12 +493,18 @@ func (c *PrometheusCollector) Describe(ch chan<- *prometheus.Desc) {
// Collect fetches the stats from all containers and delivers them as
// Prometheus metrics. It implements prometheus.PrometheusCollector.
func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
c.errors.Set(0)
c.collectMachineInfo(ch)
c.collectVersionInfo(ch)
c.collectContainersInfo(ch)
c.errors.Collect(ch)
}
const (
containerLabelPrefix = "container_label_"
containerEnvPrefix = "container_env_"
)
func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) {
containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
if err != nil {
@ -529,11 +535,11 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
}
for k, v := range container.Spec.Labels {
baseLabels = append(baseLabels, sanitizeLabelName(k))
baseLabels = append(baseLabels, sanitizeLabelName(containerLabelPrefix+k))
baseLabelValues = append(baseLabelValues, v)
}
for k, v := range container.Spec.Envs {
baseLabels = append(baseLabels, sanitizeLabelName(k))
baseLabels = append(baseLabels, sanitizeLabelName(containerEnvPrefix+k))
baseLabelValues = append(baseLabelValues, v)
}