mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Update cAdvisor dependency to latest.
Brings in new range-based requests. Fixes #5777.
This commit is contained in:
parent
adac4328e8
commit
9a75f8d76f
64
Godeps/Godeps.json
generated
64
Godeps/Godeps.json
generated
@ -203,83 +203,83 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/api",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/events",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/fs",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/storage",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/summary",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/validate",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/version",
|
||||
"Comment": "0.10.1-62-ge78e515",
|
||||
"Rev": "e78e515723d9eb387e5fd865a811f6263e946a06"
|
||||
"Comment": "0.10.1-92-g41a0c30",
|
||||
"Rev": "41a0c30fbf4df4d5d711b752785febb6ed5330a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
|
113
Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go
generated
vendored
113
Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go
generated
vendored
@ -36,6 +36,8 @@ const (
|
||||
specApi = "spec"
|
||||
eventsApi = "events"
|
||||
storageApi = "storage"
|
||||
attributesApi = "attributes"
|
||||
versionApi = "version"
|
||||
)
|
||||
|
||||
// Interface for a cAdvisor API version
|
||||
@ -305,42 +307,69 @@ func (self *version2_0) SupportedRequestTypes() []string {
|
||||
}
|
||||
|
||||
func (self *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
|
||||
opt, err := getRequestOptions(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch requestType {
|
||||
case summaryApi:
|
||||
containerName := getContainerName(request)
|
||||
glog.V(2).Infof("Api - Summary(%v)", containerName)
|
||||
|
||||
stats, err := m.GetContainerDerivedStats(containerName)
|
||||
case versionApi:
|
||||
glog.V(2).Infof("Api - Version")
|
||||
versionInfo, err := m.GetVersionInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeResult(versionInfo.CadvisorVersion, w)
|
||||
case attributesApi:
|
||||
glog.V(2).Info("Api - Attributes")
|
||||
|
||||
machineInfo, err := m.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
versionInfo, err := m.GetVersionInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info := v2.GetAttributes(machineInfo, versionInfo)
|
||||
return writeResult(info, w)
|
||||
case machineApi:
|
||||
glog.V(2).Info("Api - Machine")
|
||||
|
||||
// TODO(rjnagal): Move machineInfo from v1.
|
||||
machineInfo, err := m.GetMachineInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeResult(machineInfo, w)
|
||||
case summaryApi:
|
||||
containerName := getContainerName(request)
|
||||
glog.V(2).Infof("Api - Summary for container %q, options %+v", containerName, opt)
|
||||
|
||||
stats, err := m.GetDerivedStats(containerName, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeResult(stats, w)
|
||||
case statsApi:
|
||||
name := getContainerName(request)
|
||||
sr, err := getStatsRequest(name, r)
|
||||
glog.V(2).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt)
|
||||
conts, err := m.GetRequestedContainersInfo(name, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, sr)
|
||||
query := info.ContainerInfoRequest{
|
||||
NumStats: sr.Count,
|
||||
contStats := make(map[string][]v2.ContainerStats, 0)
|
||||
for name, cont := range conts {
|
||||
contStats[name] = convertStats(cont)
|
||||
}
|
||||
cont, err := m.GetContainerInfo(name, &query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get container %q: %v", name, err)
|
||||
}
|
||||
contStats := convertStats(cont)
|
||||
return writeResult(contStats, w)
|
||||
case specApi:
|
||||
containerName := getContainerName(request)
|
||||
glog.V(2).Infof("Api - Spec(%v)", containerName)
|
||||
spec, err := m.GetContainerSpec(containerName)
|
||||
glog.V(2).Infof("Api - Spec for container %q, options %+v", containerName, opt)
|
||||
specs, err := m.GetContainerSpec(containerName, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
specV2 := convertSpec(spec)
|
||||
return writeResult(specV2, w)
|
||||
return writeResult(specs, w)
|
||||
case storageApi:
|
||||
var err error
|
||||
fi := []v2.FsInfo{}
|
||||
@ -364,26 +393,6 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
|
||||
}
|
||||
}
|
||||
|
||||
// Convert container spec from v1 to v2.
|
||||
func convertSpec(specV1 info.ContainerSpec) v2.ContainerSpec {
|
||||
specV2 := v2.ContainerSpec{
|
||||
CreationTime: specV1.CreationTime,
|
||||
HasCpu: specV1.HasCpu,
|
||||
HasMemory: specV1.HasMemory,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit
|
||||
specV2.Cpu.Mask = specV1.Cpu.Mask
|
||||
}
|
||||
if specV1.HasMemory {
|
||||
specV2.Memory.Limit = specV1.Memory.Limit
|
||||
specV2.Memory.Reservation = specV1.Memory.Reservation
|
||||
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
|
||||
}
|
||||
return specV2
|
||||
}
|
||||
|
||||
func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
|
||||
stats := []v2.ContainerStats{}
|
||||
for _, val := range cont.Stats {
|
||||
@ -417,25 +426,35 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
|
||||
return stats
|
||||
}
|
||||
|
||||
func getStatsRequest(id string, r *http.Request) (v2.StatsRequest, error) {
|
||||
func getRequestOptions(r *http.Request) (v2.RequestOptions, error) {
|
||||
supportedTypes := map[string]bool{
|
||||
v2.TypeName: true,
|
||||
v2.TypeDocker: true,
|
||||
}
|
||||
// fill in the defaults.
|
||||
sr := v2.StatsRequest{
|
||||
IdType: "name",
|
||||
opt := v2.RequestOptions{
|
||||
IdType: v2.TypeName,
|
||||
Count: 64,
|
||||
Recursive: false,
|
||||
}
|
||||
idType := r.URL.Query().Get("type")
|
||||
if len(idType) != 0 && idType != "name" {
|
||||
return sr, fmt.Errorf("unknown 'type' %q for container name %q", idType, id)
|
||||
if len(idType) != 0 {
|
||||
if !supportedTypes[idType] {
|
||||
return opt, fmt.Errorf("unknown 'type' %q", idType)
|
||||
}
|
||||
opt.IdType = idType
|
||||
}
|
||||
count := r.URL.Query().Get("count")
|
||||
if len(count) != 0 {
|
||||
n, err := strconv.ParseUint(count, 10, 32)
|
||||
if err != nil {
|
||||
return sr, fmt.Errorf("failed to parse 'count' option: %v", count)
|
||||
return opt, fmt.Errorf("failed to parse 'count' option: %v", count)
|
||||
}
|
||||
sr.Count = int(n)
|
||||
opt.Count = int(n)
|
||||
}
|
||||
// TODO(rjnagal): Add option to specify recursive.
|
||||
return sr, nil
|
||||
recursive := r.URL.Query().Get("recursive")
|
||||
if recursive == "true" {
|
||||
opt.Recursive = true
|
||||
}
|
||||
return opt, nil
|
||||
}
|
||||
|
37
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
37
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/cgroups/systemd"
|
||||
@ -43,23 +44,23 @@ var dockerRootDir = flag.String("docker_root", "/var/lib/docker", "Absolute path
|
||||
|
||||
// Whether the system is using Systemd.
|
||||
var useSystemd bool
|
||||
|
||||
func init() {
|
||||
useSystemd = systemd.UseSystemd()
|
||||
if !useSystemd {
|
||||
// Second attempt at checking for systemd, check for a "name=systemd" cgroup.
|
||||
mnt, err := cgroups.FindCgroupMountpoint("cpu")
|
||||
if err == nil {
|
||||
// systemd presence does not mean systemd controls cgroups.
|
||||
// If system.slice cgroup exists, then systemd is taking control.
|
||||
// This breaks if user creates system.slice manually :)
|
||||
useSystemd = utils.FileExists(mnt + "/system.slice")
|
||||
}
|
||||
}
|
||||
}
|
||||
var check = sync.Once{}
|
||||
|
||||
func UseSystemd() bool {
|
||||
// init would run and initialize useSystemd before we can call this method.
|
||||
check.Do(func() {
|
||||
// run and initialize useSystemd
|
||||
useSystemd = systemd.UseSystemd()
|
||||
if !useSystemd {
|
||||
// Second attempt at checking for systemd, check for a "name=systemd" cgroup.
|
||||
mnt, err := cgroups.FindCgroupMountpoint("cpu")
|
||||
if err == nil {
|
||||
// systemd presence does not mean systemd controls cgroups.
|
||||
// If system.slice cgroup exists, then systemd is taking control.
|
||||
// This breaks if user creates system.slice manually :)
|
||||
useSystemd = utils.FileExists(mnt + "/system.slice")
|
||||
}
|
||||
}
|
||||
})
|
||||
return useSystemd
|
||||
}
|
||||
|
||||
@ -108,7 +109,7 @@ func ContainerNameToDockerId(name string) string {
|
||||
id := path.Base(name)
|
||||
|
||||
// Turn systemd cgroup name into Docker ID.
|
||||
if useSystemd {
|
||||
if UseSystemd() {
|
||||
id = strings.TrimPrefix(id, "docker-")
|
||||
id = strings.TrimSuffix(id, ".scope")
|
||||
}
|
||||
@ -119,7 +120,7 @@ func ContainerNameToDockerId(name string) string {
|
||||
// Returns a full container name for the specified Docker ID.
|
||||
func FullContainerName(dockerId string) string {
|
||||
// Add the full container name.
|
||||
if useSystemd {
|
||||
if UseSystemd() {
|
||||
return path.Join("/system.slice", fmt.Sprintf("docker-%s.scope", dockerId))
|
||||
} else {
|
||||
return path.Join("/docker", dockerId)
|
||||
@ -207,7 +208,7 @@ func Register(factory info.MachineInfoFactory, fsInfo fs.FsInfo) error {
|
||||
}
|
||||
}
|
||||
|
||||
if useSystemd {
|
||||
if UseSystemd() {
|
||||
glog.Infof("System is using systemd")
|
||||
}
|
||||
|
||||
|
37
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go
generated
vendored
37
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@ -234,25 +234,28 @@ type LoadStats struct {
|
||||
NrIoWait uint64 `json:"nr_io_wait"`
|
||||
}
|
||||
|
||||
// CPU usage time statistics.
|
||||
type CpuUsage struct {
|
||||
// Total CPU usage.
|
||||
// Units: nanoseconds
|
||||
Total uint64 `json:"total"`
|
||||
|
||||
// Per CPU/core usage of the container.
|
||||
// Unit: nanoseconds.
|
||||
PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
|
||||
|
||||
// Time spent in user space.
|
||||
// Unit: nanoseconds
|
||||
User uint64 `json:"user"`
|
||||
|
||||
// Time spent in kernel space.
|
||||
// Unit: nanoseconds
|
||||
System uint64 `json:"system"`
|
||||
}
|
||||
|
||||
// All CPU usage metrics are cumulative from the creation of the container
|
||||
type CpuStats struct {
|
||||
Usage struct {
|
||||
// Total CPU usage.
|
||||
// Units: nanoseconds
|
||||
Total uint64 `json:"total"`
|
||||
|
||||
// Per CPU/core usage of the container.
|
||||
// Unit: nanoseconds.
|
||||
PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
|
||||
|
||||
// Time spent in user space.
|
||||
// Unit: nanoseconds
|
||||
User uint64 `json:"user"`
|
||||
|
||||
// Time spent in kernel space.
|
||||
// Unit: nanoseconds
|
||||
System uint64 `json:"system"`
|
||||
} `json:"usage"`
|
||||
Usage CpuUsage `json:"usage"`
|
||||
// Smoothed average of number of runnable threads x 1000.
|
||||
// We multiply by thousand to avoid using floats, but preserving precision.
|
||||
// Load is smoothed over the last 10 seconds. Instantaneous value can be read
|
||||
|
3
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
3
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go
generated
vendored
@ -128,6 +128,9 @@ type MachineInfo struct {
|
||||
// The system uuid
|
||||
SystemUUID string `json:"system_uuid"`
|
||||
|
||||
// The boot id
|
||||
BootID string `json:"boot_id"`
|
||||
|
||||
// Filesystems on this machine.
|
||||
Filesystems []FsInfo `json:"filesystems"`
|
||||
|
||||
|
15
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go
generated
vendored
15
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go
generated
vendored
@ -22,6 +22,11 @@ import (
|
||||
"github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
TypeName = "name"
|
||||
TypeDocker = "docker"
|
||||
)
|
||||
|
||||
type CpuSpec struct {
|
||||
// Requested cpu shares. Default is 1024.
|
||||
Limit uint64 `json:"limit"`
|
||||
@ -51,6 +56,14 @@ type ContainerSpec struct {
|
||||
// Time at which the container was created.
|
||||
CreationTime time.Time `json:"creation_time,omitempty"`
|
||||
|
||||
// Other names by which the container is known within a certain namespace.
|
||||
// This is unique within that namespace.
|
||||
Aliases []string `json:"aliases,omitempty"`
|
||||
|
||||
// Namespace under which the aliases of a container are unique.
|
||||
// An example of a namespace is "docker" for Docker containers.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu CpuSpec `json:"cpu,omitempty"`
|
||||
|
||||
@ -142,7 +155,7 @@ type FsInfo struct {
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
type StatsRequest struct {
|
||||
type RequestOptions struct {
|
||||
// Type of container identifier specified - "name", "dockerid", dockeralias"
|
||||
IdType string `json:"type"`
|
||||
// Number of stats to return
|
||||
|
80
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go
generated
vendored
Normal file
80
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
// TODO(rjnagal): Move structs from v1.
|
||||
"github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
type Attributes struct {
|
||||
// Kernel version.
|
||||
KernelVersion string `json:"kernel_version"`
|
||||
|
||||
// OS image being used for cadvisor container, or host image if running on host directly.
|
||||
ContainerOsVersion string `json:"container_os_version"`
|
||||
|
||||
// Docker version.
|
||||
DockerVersion string `json:"docker_version"`
|
||||
|
||||
// cAdvisor version.
|
||||
CadvisorVersion string `json:"cadvisor_version"`
|
||||
|
||||
// The number of cores in this machine.
|
||||
NumCores int `json:"num_cores"`
|
||||
|
||||
// Maximum clock speed for the cores, in KHz.
|
||||
CpuFrequency uint64 `json:"cpu_frequency_khz"`
|
||||
|
||||
// The amount of memory (in bytes) in this machine
|
||||
MemoryCapacity int64 `json:"memory_capacity"`
|
||||
|
||||
// The machine id
|
||||
MachineID string `json:"machine_id"`
|
||||
|
||||
// The system uuid
|
||||
SystemUUID string `json:"system_uuid"`
|
||||
|
||||
// Filesystems on this machine.
|
||||
Filesystems []v1.FsInfo `json:"filesystems"`
|
||||
|
||||
// Disk map
|
||||
DiskMap map[string]v1.DiskInfo `json:"disk_map"`
|
||||
|
||||
// Network devices
|
||||
NetworkDevices []v1.NetInfo `json:"network_devices"`
|
||||
|
||||
// Machine Topology
|
||||
// Describes cpu/memory layout and hierarchy.
|
||||
Topology []v1.Node `json:"topology"`
|
||||
}
|
||||
|
||||
func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes {
|
||||
return Attributes{
|
||||
KernelVersion: vi.KernelVersion,
|
||||
ContainerOsVersion: vi.ContainerOsVersion,
|
||||
DockerVersion: vi.DockerVersion,
|
||||
CadvisorVersion: vi.CadvisorVersion,
|
||||
NumCores: mi.NumCores,
|
||||
CpuFrequency: mi.CpuFrequency,
|
||||
MemoryCapacity: mi.MemoryCapacity,
|
||||
MachineID: mi.MachineID,
|
||||
SystemUUID: mi.SystemUUID,
|
||||
Filesystems: mi.Filesystems,
|
||||
DiskMap: mi.DiskMap,
|
||||
NetworkDevices: mi.NetworkDevices,
|
||||
Topology: mi.Topology,
|
||||
}
|
||||
}
|
2
Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go
generated
vendored
2
Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go
generated
vendored
@ -182,7 +182,7 @@ func (c *containerData) housekeeping() {
|
||||
}
|
||||
|
||||
// Housekeep every second.
|
||||
glog.Infof("Start housekeeping for container %q\n", c.info.Name)
|
||||
glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name)
|
||||
lastHousekeeping := time.Now()
|
||||
for {
|
||||
select {
|
||||
|
12
Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go
generated
vendored
12
Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go
generated
vendored
@ -42,6 +42,7 @@ var CpuClockSpeedMHz = regexp.MustCompile("cpu MHz\\t*: +([0-9]+.[0-9]+)")
|
||||
var memoryCapacityRegexp = regexp.MustCompile("MemTotal: *([0-9]+) kB")
|
||||
|
||||
var machineIdFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.")
|
||||
var bootIdFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.")
|
||||
|
||||
func getClockSpeed(procInfo []byte) (uint64, error) {
|
||||
// First look through sys to find a max supported cpu frequency.
|
||||
@ -209,17 +210,17 @@ func getTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
|
||||
return nodes, numCores, nil
|
||||
}
|
||||
|
||||
func getMachineID() string {
|
||||
if len(*machineIdFilePath) == 0 {
|
||||
func getInfoFromFiles(filePaths string) string {
|
||||
if len(filePaths) == 0 {
|
||||
return ""
|
||||
}
|
||||
for _, file := range strings.Split(*machineIdFilePath, ",") {
|
||||
for _, file := range strings.Split(filePaths, ",") {
|
||||
id, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
return strings.TrimSpace(string(id))
|
||||
}
|
||||
}
|
||||
glog.Infof("Couldn't collect machine-id from any of the files in %q", *machineIdFilePath)
|
||||
glog.Infof("Couldn't collect info from any of the files in %q", filePaths)
|
||||
return ""
|
||||
}
|
||||
|
||||
@ -273,8 +274,9 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, err
|
||||
DiskMap: diskMap,
|
||||
NetworkDevices: netDevices,
|
||||
Topology: topology,
|
||||
MachineID: getMachineID(),
|
||||
MachineID: getInfoFromFiles(*machineIdFilePath),
|
||||
SystemUUID: systemUUID,
|
||||
BootID: getInfoFromFiles(*bootIdFilePath),
|
||||
}
|
||||
|
||||
for _, fs := range filesystems {
|
||||
|
239
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go
generated
vendored
239
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go
generated
vendored
@ -63,11 +63,14 @@ type Manager interface {
|
||||
// Gets information about a specific Docker container. The specified name is within the Docker namespace.
|
||||
DockerContainer(dockerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error)
|
||||
|
||||
// Gets spec for a container.
|
||||
GetContainerSpec(containerName string) (info.ContainerSpec, error)
|
||||
// Gets spec for all containers based on request options.
|
||||
GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error)
|
||||
|
||||
// Get derived stats for a container.
|
||||
GetContainerDerivedStats(containerName string) (v2.DerivedStats, error)
|
||||
// Gets summary stats for all containers based on request options.
|
||||
GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error)
|
||||
|
||||
// Get info for all requested containers based on the request options.
|
||||
GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error)
|
||||
|
||||
// Get information about the machine.
|
||||
GetMachineInfo() (*info.MachineInfo, error)
|
||||
@ -297,16 +300,60 @@ func (self *manager) getContainerData(containerName string) (*containerData, err
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
func (self *manager) GetContainerSpec(containerName string) (info.ContainerSpec, error) {
|
||||
cont, err := self.getContainerData(containerName)
|
||||
func (self *manager) GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error) {
|
||||
conts, err := self.getRequestedContainers(containerName, options)
|
||||
if err != nil {
|
||||
return info.ContainerSpec{}, err
|
||||
return nil, err
|
||||
}
|
||||
cinfo, err := cont.GetInfo()
|
||||
stats := make(map[string]v2.DerivedStats)
|
||||
for name, cont := range conts {
|
||||
d, err := cont.DerivedStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stats[name] = d
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (self *manager) GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error) {
|
||||
conts, err := self.getRequestedContainers(containerName, options)
|
||||
if err != nil {
|
||||
return info.ContainerSpec{}, err
|
||||
return nil, err
|
||||
}
|
||||
return self.getAdjustedSpec(cinfo), nil
|
||||
specs := make(map[string]v2.ContainerSpec)
|
||||
for name, cont := range conts {
|
||||
cinfo, err := cont.GetInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec := self.getV2Spec(cinfo)
|
||||
specs[name] = spec
|
||||
}
|
||||
return specs, nil
|
||||
}
|
||||
|
||||
// Get V2 container spec from v1 container info.
|
||||
func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
|
||||
specV1 := self.getAdjustedSpec(cinfo)
|
||||
specV2 := v2.ContainerSpec{
|
||||
CreationTime: specV1.CreationTime,
|
||||
HasCpu: specV1.HasCpu,
|
||||
HasMemory: specV1.HasMemory,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit
|
||||
specV2.Cpu.Mask = specV1.Cpu.Mask
|
||||
}
|
||||
if specV1.HasMemory {
|
||||
specV2.Memory.Limit = specV1.Memory.Limit
|
||||
specV2.Memory.Reservation = specV1.Memory.Reservation
|
||||
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
|
||||
}
|
||||
specV2.Aliases = cinfo.Aliases
|
||||
specV2.Namespace = cinfo.Namespace
|
||||
return specV2
|
||||
}
|
||||
|
||||
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
|
||||
@ -353,40 +400,58 @@ func (self *manager) containerDataToContainerInfo(cont *containerData, query *in
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (self *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
|
||||
var containers []*containerData
|
||||
func() {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
containers = make([]*containerData, 0, len(self.containers))
|
||||
func (self *manager) getContainer(containerName string) (*containerData, error) {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
cont, ok := self.containers[namespacedContainerName{Name: containerName}]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown container %q", containerName)
|
||||
}
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
// Get all the subcontainers of the specified container
|
||||
matchedName := path.Join(containerName, "/")
|
||||
for i := range self.containers {
|
||||
name := self.containers[i].info.Name
|
||||
if name == containerName || strings.HasPrefix(name, matchedName) {
|
||||
containers = append(containers, self.containers[i])
|
||||
}
|
||||
func (self *manager) getSubcontainers(containerName string) map[string]*containerData {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
containersMap := make(map[string]*containerData, len(self.containers))
|
||||
|
||||
// Get all the unique subcontainers of the specified container
|
||||
matchedName := path.Join(containerName, "/")
|
||||
for i := range self.containers {
|
||||
name := self.containers[i].info.Name
|
||||
if name == containerName || strings.HasPrefix(name, matchedName) {
|
||||
containersMap[self.containers[i].info.Name] = self.containers[i]
|
||||
}
|
||||
}()
|
||||
}
|
||||
return containersMap
|
||||
}
|
||||
|
||||
func (self *manager) SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
|
||||
containersMap := self.getSubcontainers(containerName)
|
||||
|
||||
containers := make([]*containerData, 0, len(containersMap))
|
||||
for _, cont := range containersMap {
|
||||
containers = append(containers, cont)
|
||||
}
|
||||
return self.containerDataSliceToContainerInfoSlice(containers, query)
|
||||
}
|
||||
|
||||
func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
|
||||
var containers map[string]*containerData
|
||||
func() {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
containers = make(map[string]*containerData, len(self.containers))
|
||||
func (self *manager) getAllDockerContainers() map[string]*containerData {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
containers := make(map[string]*containerData, len(self.containers))
|
||||
|
||||
// Get containers in the Docker namespace.
|
||||
for name, cont := range self.containers {
|
||||
if name.Namespace == docker.DockerNamespace {
|
||||
containers[cont.info.Name] = cont
|
||||
}
|
||||
// Get containers in the Docker namespace.
|
||||
for name, cont := range self.containers {
|
||||
if name.Namespace == docker.DockerNamespace {
|
||||
containers[cont.info.Name] = cont
|
||||
}
|
||||
}()
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
||||
func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[string]info.ContainerInfo, error) {
|
||||
containers := self.getAllDockerContainers()
|
||||
|
||||
output := make(map[string]info.ContainerInfo, len(containers))
|
||||
for name, cont := range containers {
|
||||
@ -399,23 +464,25 @@ func (self *manager) AllDockerContainers(query *info.ContainerInfoRequest) (map[
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (self *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
|
||||
var container *containerData = nil
|
||||
func() {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
func (self *manager) getDockerContainer(containerName string) (*containerData, error) {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
|
||||
// Check for the container in the Docker container namespace.
|
||||
cont, ok := self.containers[namespacedContainerName{
|
||||
Namespace: docker.DockerNamespace,
|
||||
Name: containerName,
|
||||
}]
|
||||
if ok {
|
||||
container = cont
|
||||
}
|
||||
}()
|
||||
if container == nil {
|
||||
return info.ContainerInfo{}, fmt.Errorf("unable to find Docker container %q", containerName)
|
||||
// Check for the container in the Docker container namespace.
|
||||
cont, ok := self.containers[namespacedContainerName{
|
||||
Namespace: docker.DockerNamespace,
|
||||
Name: containerName,
|
||||
}]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unable to find Docker container %q", containerName)
|
||||
}
|
||||
return cont, nil
|
||||
}
|
||||
|
||||
func (self *manager) DockerContainer(containerName string, query *info.ContainerInfoRequest) (info.ContainerInfo, error) {
|
||||
container, err := self.getDockerContainer(containerName)
|
||||
if err != nil {
|
||||
return info.ContainerInfo{}, err
|
||||
}
|
||||
|
||||
inf, err := self.containerDataToContainerInfo(container, query)
|
||||
@ -444,18 +511,60 @@ func (self *manager) containerDataSliceToContainerInfoSlice(containers []*contai
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (self *manager) GetContainerDerivedStats(containerName string) (v2.DerivedStats, error) {
|
||||
var ok bool
|
||||
var cont *containerData
|
||||
func() {
|
||||
self.containersLock.RLock()
|
||||
defer self.containersLock.RUnlock()
|
||||
cont, ok = self.containers[namespacedContainerName{Name: containerName}]
|
||||
}()
|
||||
if !ok {
|
||||
return v2.DerivedStats{}, fmt.Errorf("unknown container %q", containerName)
|
||||
func (self *manager) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error) {
|
||||
containers, err := self.getRequestedContainers(containerName, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cont.DerivedStats()
|
||||
containersMap := make(map[string]*info.ContainerInfo)
|
||||
query := info.ContainerInfoRequest{
|
||||
NumStats: options.Count,
|
||||
}
|
||||
for name, data := range containers {
|
||||
info, err := self.containerDataToContainerInfo(data, &query)
|
||||
if err != nil {
|
||||
// Skip containers with errors, we try to degrade gracefully.
|
||||
continue
|
||||
}
|
||||
containersMap[name] = info
|
||||
}
|
||||
return containersMap, nil
|
||||
}
|
||||
|
||||
func (self *manager) getRequestedContainers(containerName string, options v2.RequestOptions) (map[string]*containerData, error) {
|
||||
containersMap := make(map[string]*containerData)
|
||||
switch options.IdType {
|
||||
case v2.TypeName:
|
||||
if options.Recursive == false {
|
||||
cont, err := self.getContainer(containerName)
|
||||
if err != nil {
|
||||
return containersMap, err
|
||||
}
|
||||
containersMap[cont.info.Name] = cont
|
||||
} else {
|
||||
containersMap = self.getSubcontainers(containerName)
|
||||
if len(containersMap) == 0 {
|
||||
return containersMap, fmt.Errorf("unknown container: %q", containerName)
|
||||
}
|
||||
}
|
||||
case v2.TypeDocker:
|
||||
if options.Recursive == false {
|
||||
containerName = strings.TrimPrefix(containerName, "/")
|
||||
cont, err := self.getDockerContainer(containerName)
|
||||
if err != nil {
|
||||
return containersMap, err
|
||||
}
|
||||
containersMap[cont.info.Name] = cont
|
||||
} else {
|
||||
if containerName != "/" {
|
||||
return containersMap, fmt.Errorf("invalid request for docker container %q with subcontainers", containerName)
|
||||
}
|
||||
containersMap = self.getAllDockerContainers()
|
||||
}
|
||||
default:
|
||||
return containersMap, fmt.Errorf("invalid request type %q", options.IdType)
|
||||
}
|
||||
return containersMap, nil
|
||||
}
|
||||
|
||||
func (self *manager) GetFsInfo(label string) ([]v2.FsInfo, error) {
|
||||
@ -547,7 +656,7 @@ func (m *manager) createContainer(containerName string) error {
|
||||
if alreadyExists {
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
glog.V(2).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
|
||||
contSpecs, err := cont.handler.GetSpec()
|
||||
if err != nil {
|
||||
@ -605,7 +714,7 @@ func (m *manager) destroyContainer(containerName string) error {
|
||||
Name: alias,
|
||||
})
|
||||
}
|
||||
glog.Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
glog.V(2).Infof("Destroyed container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
|
||||
contRef, err := cont.handler.ContainerReference()
|
||||
if err != nil {
|
||||
|
17
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_mock.go
generated
vendored
17
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_mock.go
generated
vendored
@ -55,14 +55,19 @@ func (c *ManagerMock) DockerContainer(name string, query *info.ContainerInfoRequ
|
||||
return args.Get(0).(info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetContainerSpec(containerName string) (info.ContainerSpec, error) {
|
||||
args := c.Called(containerName)
|
||||
return args.Get(0).(info.ContainerSpec), args.Error(1)
|
||||
func (c *ManagerMock) GetContainerSpec(containerName string, options v2.RequestOptions) (map[string]v2.ContainerSpec, error) {
|
||||
args := c.Called(containerName, options)
|
||||
return args.Get(0).(map[string]v2.ContainerSpec), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetContainerDerivedStats(containerName string) (v2.DerivedStats, error) {
|
||||
args := c.Called(containerName)
|
||||
return args.Get(0).(v2.DerivedStats), args.Error(1)
|
||||
func (c *ManagerMock) GetDerivedStats(containerName string, options v2.RequestOptions) (map[string]v2.DerivedStats, error) {
|
||||
args := c.Called(containerName, options)
|
||||
return args.Get(0).(map[string]v2.DerivedStats), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) GetRequestedContainersInfo(containerName string, options v2.RequestOptions) (map[string]*info.ContainerInfo, error) {
|
||||
args := c.Called(containerName, options)
|
||||
return args.Get(0).(map[string]*info.ContainerInfo), args.Error(1)
|
||||
}
|
||||
|
||||
func (c *ManagerMock) WatchForEvents(queryuest *events.Request, passedChannel chan *events.Event) error {
|
||||
|
640
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
640
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@ -20,253 +20,359 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/manager"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type prometheusMetric struct {
|
||||
valueType prometheus.ValueType
|
||||
value float64
|
||||
labels []string
|
||||
// This will usually be manager.Manager, but can be swapped out for testing.
|
||||
type subcontainersInfoProvider interface {
|
||||
// Get information about all subcontainers of the specified container (includes self).
|
||||
SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error)
|
||||
}
|
||||
|
||||
// metricValue describes a single metric value for a given set of label values
|
||||
// within a parent containerMetric.
|
||||
type metricValue struct {
|
||||
value float64
|
||||
labels []string
|
||||
}
|
||||
|
||||
type metricValues []metricValue
|
||||
|
||||
// fsValues is a helper method for assembling per-filesystem stats.
|
||||
func fsValues(fsStats []info.FsStats, valueFn func(*info.FsStats) float64) metricValues {
|
||||
values := make(metricValues, 0, len(fsStats))
|
||||
for _, stat := range fsStats {
|
||||
values = append(values, metricValue{
|
||||
value: valueFn(&stat),
|
||||
labels: []string{stat.Device},
|
||||
})
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// A containerMetric describes a multi-dimensional metric used for exposing
|
||||
// a certain type of container statistic.
|
||||
type containerMetric struct {
|
||||
name string
|
||||
help string
|
||||
valueType prometheus.ValueType
|
||||
extraLabels []string
|
||||
getValues func(s *info.ContainerStats) metricValues
|
||||
}
|
||||
|
||||
func (cm *containerMetric) desc() *prometheus.Desc {
|
||||
return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id"}, cm.extraLabels...), nil)
|
||||
}
|
||||
|
||||
// PrometheusCollector implements prometheus.Collector.
|
||||
type PrometheusCollector struct {
|
||||
manager manager.Manager
|
||||
|
||||
errors prometheus.Gauge
|
||||
lastSeen *prometheus.Desc
|
||||
|
||||
cpuUsageUserSeconds *prometheus.Desc
|
||||
cpuUsageSystemSeconds *prometheus.Desc
|
||||
cpuUsageSecondsPerCPU *prometheus.Desc
|
||||
|
||||
memoryUsageBytes *prometheus.Desc
|
||||
memoryWorkingSet *prometheus.Desc
|
||||
memoryFailures *prometheus.Desc
|
||||
|
||||
fsLimit *prometheus.Desc
|
||||
fsUsage *prometheus.Desc
|
||||
fsReads *prometheus.Desc
|
||||
fsReadsSectors *prometheus.Desc
|
||||
fsReadsMerged *prometheus.Desc
|
||||
fsReadTime *prometheus.Desc
|
||||
|
||||
fsWrites *prometheus.Desc
|
||||
fsWritesSectors *prometheus.Desc
|
||||
fsWritesMerged *prometheus.Desc
|
||||
fsWriteTime *prometheus.Desc
|
||||
|
||||
fsIoInProgress *prometheus.Desc
|
||||
fsIoTime *prometheus.Desc
|
||||
|
||||
fsWeightedIoTime *prometheus.Desc
|
||||
|
||||
networkRxBytes *prometheus.Desc
|
||||
networkRxPackets *prometheus.Desc
|
||||
networkRxErrors *prometheus.Desc
|
||||
networkRxDropped *prometheus.Desc
|
||||
networkTxBytes *prometheus.Desc
|
||||
networkTxPackets *prometheus.Desc
|
||||
networkTxErrors *prometheus.Desc
|
||||
networkTxDropped *prometheus.Desc
|
||||
|
||||
tasks *prometheus.Desc
|
||||
|
||||
descs []*prometheus.Desc
|
||||
infoProvider subcontainersInfoProvider
|
||||
errors prometheus.Gauge
|
||||
containerMetrics []containerMetric
|
||||
}
|
||||
|
||||
// NewPrometheusCollector returns a new PrometheusCollector.
|
||||
func NewPrometheusCollector(manager manager.Manager) *PrometheusCollector {
|
||||
func NewPrometheusCollector(infoProvider subcontainersInfoProvider) *PrometheusCollector {
|
||||
c := &PrometheusCollector{
|
||||
manager: manager,
|
||||
infoProvider: infoProvider,
|
||||
errors: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "container",
|
||||
Name: "scrape_error",
|
||||
Help: "1 if there was an error while getting container metrics, 0 otherwise",
|
||||
}),
|
||||
lastSeen: prometheus.NewDesc(
|
||||
"container_last_seen",
|
||||
"Last time a container was seen by the exporter",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
cpuUsageUserSeconds: prometheus.NewDesc(
|
||||
"container_cpu_user_seconds_total",
|
||||
"Cumulative user cpu time consumed in seconds.",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
cpuUsageSystemSeconds: prometheus.NewDesc(
|
||||
"container_cpu_system_seconds_total",
|
||||
"Cumulative system cpu time consumed in seconds.",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
cpuUsageSecondsPerCPU: prometheus.NewDesc(
|
||||
"container_cpu_usage_seconds_total",
|
||||
"Cumulative cpu time consumed per cpu in seconds.",
|
||||
[]string{"name", "id", "cpu"},
|
||||
nil),
|
||||
memoryUsageBytes: prometheus.NewDesc(
|
||||
"container_memory_usage_bytes",
|
||||
"Current memory usage in bytes.",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
memoryWorkingSet: prometheus.NewDesc(
|
||||
"container_memory_working_set_bytes",
|
||||
"Current working set in bytes.",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
memoryFailures: prometheus.NewDesc(
|
||||
"container_memory_failures_total",
|
||||
"Cumulative count of memory allocation failures.",
|
||||
[]string{"type", "scope", "name", "id"},
|
||||
nil),
|
||||
|
||||
fsLimit: prometheus.NewDesc(
|
||||
"container_fs_limit_bytes",
|
||||
"Number of bytes that can be consumed by the container on this filesystem.",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsUsage: prometheus.NewDesc(
|
||||
"container_fs_usage_bytes",
|
||||
"Number of bytes that are consumed by the container on this filesystem.",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsReads: prometheus.NewDesc(
|
||||
"container_fs_reads_total",
|
||||
"Cumulative count of reads completed",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsReadsSectors: prometheus.NewDesc(
|
||||
"container_fs_sector_reads_total",
|
||||
"Cumulative count of sector reads completed",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsReadsMerged: prometheus.NewDesc(
|
||||
"container_fs_reads_merged_total",
|
||||
"Cumulative count of reads merged",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsReadTime: prometheus.NewDesc(
|
||||
"container_fs_read_seconds_total",
|
||||
"Cumulative count of seconds spent reading",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsWrites: prometheus.NewDesc(
|
||||
"container_fs_writes_total",
|
||||
"Cumulative count of writes completed",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsWritesSectors: prometheus.NewDesc(
|
||||
"container_fs_sector_writes_total",
|
||||
"Cumulative count of sector writes completed",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsWritesMerged: prometheus.NewDesc(
|
||||
"container_fs_writes_merged_total",
|
||||
"Cumulative count of writes merged",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsWriteTime: prometheus.NewDesc(
|
||||
"container_fs_write_seconds_total",
|
||||
"Cumulative count of seconds spent writing",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsIoInProgress: prometheus.NewDesc(
|
||||
"container_fs_io_current",
|
||||
"Number of I/Os currently in progress",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsIoTime: prometheus.NewDesc(
|
||||
"container_fs_io_time_seconds_total",
|
||||
"Cumulative count of seconds spent doing I/Os",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
fsWeightedIoTime: prometheus.NewDesc(
|
||||
"container_fs_io_time_weighted_seconds_total",
|
||||
"Cumulative weighted I/O time in seconds",
|
||||
[]string{"name", "id", "device"},
|
||||
nil),
|
||||
networkRxBytes: prometheus.NewDesc(
|
||||
"container_network_receive_bytes_total",
|
||||
"Cumulative count of bytes received",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
networkRxPackets: prometheus.NewDesc(
|
||||
"container_network_receive_packets_total",
|
||||
"Cumulative count of packets received",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
networkRxDropped: prometheus.NewDesc(
|
||||
"container_network_receive_packets_dropped_total",
|
||||
"Cumulative count of packets dropped while receiving",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
networkRxErrors: prometheus.NewDesc(
|
||||
"container_network_receive_errors_total",
|
||||
"Cumulative count of errors encountered while receiving",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
networkTxBytes: prometheus.NewDesc(
|
||||
"container_network_transmit_bytes_total",
|
||||
"Cumulative count of bytes transmitted",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
networkTxPackets: prometheus.NewDesc(
|
||||
"container_network_transmit_packets_total",
|
||||
"Cumulative count of packets transmitted",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
networkTxDropped: prometheus.NewDesc(
|
||||
"container_network_transmit_packets_dropped_total",
|
||||
"Cumulative count of packets dropped while transmitting",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
networkTxErrors: prometheus.NewDesc(
|
||||
"container_network_transmit_errors_total",
|
||||
"Cumulative count of errors encountered while transmitting",
|
||||
[]string{"name", "id"},
|
||||
nil),
|
||||
|
||||
tasks: prometheus.NewDesc(
|
||||
"container_tasks_state",
|
||||
"Number of tasks in given state",
|
||||
[]string{"state", "name", "id"},
|
||||
nil),
|
||||
}
|
||||
c.descs = []*prometheus.Desc{
|
||||
c.lastSeen,
|
||||
|
||||
c.cpuUsageUserSeconds,
|
||||
c.cpuUsageSystemSeconds,
|
||||
|
||||
c.memoryUsageBytes,
|
||||
c.memoryWorkingSet,
|
||||
c.memoryFailures,
|
||||
|
||||
c.fsLimit,
|
||||
c.fsUsage,
|
||||
c.fsReads,
|
||||
c.fsReadsSectors,
|
||||
c.fsReadsMerged,
|
||||
c.fsReadTime,
|
||||
c.fsWrites,
|
||||
c.fsWritesSectors,
|
||||
c.fsWritesMerged,
|
||||
c.fsWriteTime,
|
||||
c.fsIoInProgress,
|
||||
c.fsIoTime,
|
||||
c.fsWeightedIoTime,
|
||||
|
||||
c.networkRxBytes,
|
||||
c.networkRxPackets,
|
||||
c.networkRxErrors,
|
||||
c.networkRxDropped,
|
||||
c.networkTxBytes,
|
||||
c.networkTxPackets,
|
||||
c.networkTxErrors,
|
||||
c.networkTxDropped,
|
||||
|
||||
c.tasks,
|
||||
containerMetrics: []containerMetric{
|
||||
{
|
||||
name: "container_last_seen",
|
||||
help: "Last time a container was seen by the exporter",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(time.Now().Unix())}}
|
||||
},
|
||||
}, {
|
||||
name: "container_cpu_user_seconds_total",
|
||||
help: "Cumulative user cpu time consumed in seconds.",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.Usage.User) / float64(time.Second)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_cpu_system_seconds_total",
|
||||
help: "Cumulative system cpu time consumed in seconds.",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Cpu.Usage.System) / float64(time.Second)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_cpu_usage_seconds_total",
|
||||
help: "Cumulative cpu time consumed per cpu in seconds.",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"cpu"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
values := make(metricValues, 0, len(s.Cpu.Usage.PerCpu))
|
||||
for i, value := range s.Cpu.Usage.PerCpu {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value) / float64(time.Second),
|
||||
labels: []string{fmt.Sprintf("cpu%02d", i)},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_usage_bytes",
|
||||
help: "Current memory usage in bytes.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.Usage)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_working_set_bytes",
|
||||
help: "Current working set in bytes.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.WorkingSet)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_failures_total",
|
||||
help: "Cumulative count of memory allocation failures.",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"type", "scope"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.Memory.ContainerData.Pgfault),
|
||||
labels: []string{"pgfault", "container"},
|
||||
},
|
||||
{
|
||||
value: float64(s.Memory.ContainerData.Pgmajfault),
|
||||
labels: []string{"pgmajfault", "container"},
|
||||
},
|
||||
{
|
||||
value: float64(s.Memory.HierarchicalData.Pgfault),
|
||||
labels: []string{"pgfault", "hierarchy"},
|
||||
},
|
||||
{
|
||||
value: float64(s.Memory.HierarchicalData.Pgmajfault),
|
||||
labels: []string{"pgmajfault", "hierarchy"},
|
||||
},
|
||||
}
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_limit_bytes",
|
||||
help: "Number of bytes that can be consumed by the container on this filesystem.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.Limit)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_usage_bytes",
|
||||
help: "Number of bytes that are consumed by the container on this filesystem.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.Usage)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_total",
|
||||
help: "Cumulative count of reads completed",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsCompleted)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_sector_reads_total",
|
||||
help: "Cumulative count of sector reads completed",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsRead)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_reads_merged_total",
|
||||
help: "Cumulative count of reads merged",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadsMerged)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_read_seconds_total",
|
||||
help: "Cumulative count of seconds spent reading",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.ReadTime) / float64(time.Second)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_total",
|
||||
help: "Cumulative count of writes completed",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesCompleted)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_sector_writes_total",
|
||||
help: "Cumulative count of sector writes completed",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.SectorsWritten)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_writes_merged_total",
|
||||
help: "Cumulative count of writes merged",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WritesMerged)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_write_seconds_total",
|
||||
help: "Cumulative count of seconds spent writing",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WriteTime) / float64(time.Second)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_current",
|
||||
help: "Number of I/Os currently in progress",
|
||||
valueType: prometheus.GaugeValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.IoInProgress)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_time_seconds_total",
|
||||
help: "Cumulative count of seconds spent doing I/Os",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(float64(fs.IoTime) / float64(time.Second))
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_fs_io_time_weighted_seconds_total",
|
||||
help: "Cumulative weighted I/O time in seconds",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"device"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return fsValues(s.Filesystem, func(fs *info.FsStats) float64 {
|
||||
return float64(fs.WeightedIoTime) / float64(time.Second)
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_bytes_total",
|
||||
help: "Cumulative count of bytes received",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxBytes)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_packets_total",
|
||||
help: "Cumulative count of packets received",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxPackets)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxDropped)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_errors_total",
|
||||
help: "Cumulative count of errors encountered while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxErrors)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_bytes_total",
|
||||
help: "Cumulative count of bytes transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxBytes)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_packets_total",
|
||||
help: "Cumulative count of packets transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxPackets)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxDropped)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_errors_total",
|
||||
help: "Cumulative count of errors encountered while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxErrors)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_tasks_state",
|
||||
help: "Number of tasks in given state",
|
||||
extraLabels: []string{"state"},
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{
|
||||
{
|
||||
value: float64(s.TaskStats.NrSleeping),
|
||||
labels: []string{"sleeping"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrRunning),
|
||||
labels: []string{"running"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrStopped),
|
||||
labels: []string{"stopped"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrUninterruptible),
|
||||
labels: []string{"uninterruptible"},
|
||||
},
|
||||
{
|
||||
value: float64(s.TaskStats.NrIoWait),
|
||||
labels: []string{"iowaiting"},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return c
|
||||
}
|
||||
@ -275,15 +381,15 @@ func NewPrometheusCollector(manager manager.Manager) *PrometheusCollector {
|
||||
// implements prometheus.PrometheusCollector.
|
||||
func (c *PrometheusCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
c.errors.Describe(ch)
|
||||
for _, d := range c.descs {
|
||||
ch <- d
|
||||
for _, cm := range c.containerMetrics {
|
||||
ch <- cm.desc()
|
||||
}
|
||||
}
|
||||
|
||||
// Collect fetches the stats from all containers and delivers them as
|
||||
// Prometheus metrics. It implements prometheus.PrometheusCollector.
|
||||
func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
containers, err := c.manager.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
|
||||
containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1})
|
||||
if err != nil {
|
||||
c.errors.Set(1)
|
||||
glog.Warning("Couldn't get containers: %s", err)
|
||||
@ -297,68 +403,10 @@ func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
}
|
||||
stats := container.Stats[0]
|
||||
|
||||
for desc, metrics := range map[*prometheus.Desc][]prometheusMetric{
|
||||
c.cpuUsageUserSeconds: {{valueType: prometheus.CounterValue, value: float64(stats.Cpu.Usage.User) / float64(time.Second)}},
|
||||
c.cpuUsageSystemSeconds: {{valueType: prometheus.CounterValue, value: float64(stats.Cpu.Usage.System) / float64(time.Second)}},
|
||||
|
||||
c.memoryFailures: {
|
||||
{valueType: prometheus.CounterValue, labels: []string{"pgfault", "container"}, value: float64(stats.Memory.ContainerData.Pgfault)},
|
||||
{valueType: prometheus.CounterValue, labels: []string{"pgmajfault", "container"}, value: float64(stats.Memory.ContainerData.Pgmajfault)},
|
||||
{valueType: prometheus.CounterValue, labels: []string{"pgfault", "hierarchy"}, value: float64(stats.Memory.HierarchicalData.Pgfault)},
|
||||
{valueType: prometheus.CounterValue, labels: []string{"pgmajfault", "hierarchy"}, value: float64(stats.Memory.HierarchicalData.Pgmajfault)},
|
||||
},
|
||||
c.tasks: {
|
||||
{valueType: prometheus.GaugeValue, labels: []string{"sleeping"}, value: float64(stats.TaskStats.NrSleeping)},
|
||||
{valueType: prometheus.GaugeValue, labels: []string{"running"}, value: float64(stats.TaskStats.NrRunning)},
|
||||
{valueType: prometheus.GaugeValue, labels: []string{"stopped"}, value: float64(stats.TaskStats.NrStopped)},
|
||||
{valueType: prometheus.GaugeValue, labels: []string{"uninterruptible"}, value: float64(stats.TaskStats.NrUninterruptible)},
|
||||
{valueType: prometheus.GaugeValue, labels: []string{"iowaiting"}, value: float64(stats.TaskStats.NrIoWait)},
|
||||
},
|
||||
|
||||
c.lastSeen: {{valueType: prometheus.GaugeValue, value: float64(time.Now().Unix())}},
|
||||
|
||||
c.memoryUsageBytes: {{valueType: prometheus.GaugeValue, value: float64(stats.Memory.Usage)}},
|
||||
c.memoryWorkingSet: {{valueType: prometheus.GaugeValue, value: float64(stats.Memory.WorkingSet)}},
|
||||
|
||||
c.networkRxBytes: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxBytes)}},
|
||||
c.networkRxPackets: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxPackets)}},
|
||||
c.networkRxErrors: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxErrors)}},
|
||||
c.networkRxDropped: {{valueType: prometheus.CounterValue, value: float64(stats.Network.RxDropped)}},
|
||||
c.networkTxBytes: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxBytes)}},
|
||||
c.networkTxPackets: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxPackets)}},
|
||||
c.networkTxErrors: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxErrors)}},
|
||||
c.networkTxDropped: {{valueType: prometheus.CounterValue, value: float64(stats.Network.TxDropped)}},
|
||||
} {
|
||||
for _, m := range metrics {
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, float64(m.value), append(m.labels, name, id)...)
|
||||
}
|
||||
}
|
||||
|
||||
// Metrics with dynamic labels
|
||||
for i, value := range stats.Cpu.Usage.PerCpu {
|
||||
ch <- prometheus.MustNewConstMetric(c.cpuUsageSecondsPerCPU, prometheus.CounterValue, float64(value)/float64(time.Second), name, id, fmt.Sprintf("cpu%02d", i))
|
||||
}
|
||||
|
||||
for _, stat := range stats.Filesystem {
|
||||
for desc, m := range map[*prometheus.Desc]prometheusMetric{
|
||||
c.fsReads: {valueType: prometheus.CounterValue, value: float64(stat.ReadsCompleted)},
|
||||
c.fsReadsSectors: {valueType: prometheus.CounterValue, value: float64(stat.SectorsRead)},
|
||||
c.fsReadsMerged: {valueType: prometheus.CounterValue, value: float64(stat.ReadsMerged)},
|
||||
c.fsReadTime: {valueType: prometheus.CounterValue, value: float64(stat.ReadTime) / float64(time.Second)},
|
||||
|
||||
c.fsWrites: {valueType: prometheus.CounterValue, value: float64(stat.WritesCompleted)},
|
||||
c.fsWritesSectors: {valueType: prometheus.CounterValue, value: float64(stat.SectorsWritten)},
|
||||
c.fsWritesMerged: {valueType: prometheus.CounterValue, value: float64(stat.WritesMerged)},
|
||||
c.fsWriteTime: {valueType: prometheus.CounterValue, value: float64(stat.WriteTime) / float64(time.Second)},
|
||||
|
||||
c.fsIoTime: {valueType: prometheus.CounterValue, value: float64(stat.IoTime) / float64(time.Second)},
|
||||
c.fsWeightedIoTime: {valueType: prometheus.CounterValue, value: float64(stat.WeightedIoTime) / float64(time.Second)},
|
||||
|
||||
c.fsIoInProgress: {valueType: prometheus.GaugeValue, value: float64(stat.IoInProgress)},
|
||||
c.fsLimit: {valueType: prometheus.GaugeValue, value: float64(stat.Limit)},
|
||||
c.fsUsage: {valueType: prometheus.GaugeValue, value: float64(stat.Usage)},
|
||||
} {
|
||||
ch <- prometheus.MustNewConstMetric(desc, m.valueType, m.value, name, id, stat.Device)
|
||||
for _, cm := range c.containerMetrics {
|
||||
desc := cm.desc()
|
||||
for _, metricValue := range cm.getValues(stats) {
|
||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id}, metricValue.labels...)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
145
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go
generated
vendored
Normal file
145
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type testSubcontainersInfoProvider struct{}
|
||||
|
||||
func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.ContainerInfoRequest) ([]*info.ContainerInfo, error) {
|
||||
return []*info.ContainerInfo{
|
||||
{
|
||||
ContainerReference: info.ContainerReference{
|
||||
Name: "testcontainer",
|
||||
},
|
||||
Stats: []*info.ContainerStats{
|
||||
{
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 1,
|
||||
PerCpu: []uint64{2, 3, 4, 5},
|
||||
User: 6,
|
||||
System: 7,
|
||||
},
|
||||
},
|
||||
Memory: info.MemoryStats{
|
||||
Usage: 8,
|
||||
WorkingSet: 9,
|
||||
ContainerData: info.MemoryStatsMemoryData{
|
||||
Pgfault: 10,
|
||||
Pgmajfault: 11,
|
||||
},
|
||||
HierarchicalData: info.MemoryStatsMemoryData{
|
||||
Pgfault: 12,
|
||||
Pgmajfault: 13,
|
||||
},
|
||||
},
|
||||
Network: info.NetworkStats{
|
||||
RxBytes: 14,
|
||||
RxPackets: 15,
|
||||
RxErrors: 16,
|
||||
RxDropped: 17,
|
||||
TxBytes: 18,
|
||||
TxPackets: 19,
|
||||
TxErrors: 20,
|
||||
TxDropped: 21,
|
||||
},
|
||||
Filesystem: []info.FsStats{
|
||||
{
|
||||
Device: "sda1",
|
||||
Limit: 22,
|
||||
Usage: 23,
|
||||
ReadsCompleted: 24,
|
||||
ReadsMerged: 25,
|
||||
SectorsRead: 26,
|
||||
ReadTime: 27,
|
||||
WritesCompleted: 28,
|
||||
WritesMerged: 39,
|
||||
SectorsWritten: 40,
|
||||
WriteTime: 41,
|
||||
IoInProgress: 42,
|
||||
IoTime: 43,
|
||||
WeightedIoTime: 44,
|
||||
},
|
||||
{
|
||||
Device: "sda2",
|
||||
Limit: 37,
|
||||
Usage: 38,
|
||||
ReadsCompleted: 39,
|
||||
ReadsMerged: 40,
|
||||
SectorsRead: 41,
|
||||
ReadTime: 42,
|
||||
WritesCompleted: 43,
|
||||
WritesMerged: 44,
|
||||
SectorsWritten: 45,
|
||||
WriteTime: 46,
|
||||
IoInProgress: 47,
|
||||
IoTime: 48,
|
||||
WeightedIoTime: 49,
|
||||
},
|
||||
},
|
||||
TaskStats: info.LoadStats{
|
||||
NrSleeping: 50,
|
||||
NrRunning: 51,
|
||||
NrStopped: 52,
|
||||
NrUninterruptible: 53,
|
||||
NrIoWait: 54,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestPrometheusCollector(t *testing.T) {
|
||||
prometheus.MustRegister(NewPrometheusCollector(testSubcontainersInfoProvider{}))
|
||||
|
||||
rw := httptest.NewRecorder()
|
||||
prometheus.Handler().ServeHTTP(rw, &http.Request{})
|
||||
|
||||
metricsFile := "testdata/prometheus_metrics"
|
||||
wantMetrics, err := ioutil.ReadFile(metricsFile)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read input test file %s", metricsFile)
|
||||
}
|
||||
|
||||
wantLines := strings.Split(string(wantMetrics), "\n")
|
||||
gotLines := strings.Split(string(rw.Body.String()), "\n")
|
||||
|
||||
// Until the Prometheus Go client library offers better testability
|
||||
// (https://github.com/prometheus/client_golang/issues/58), we simply compare
|
||||
// verbatim text-format metrics outputs, but ignore certain metric lines
|
||||
// whose value depends on the current time or local circumstances.
|
||||
includeRe := regexp.MustCompile("^(# HELP |# TYPE |)container_")
|
||||
ignoreRe := regexp.MustCompile("^container_last_seen{")
|
||||
for i, want := range wantLines {
|
||||
if !includeRe.MatchString(want) || ignoreRe.MatchString(want) {
|
||||
continue
|
||||
}
|
||||
if want != gotLines[i] {
|
||||
t.Fatalf("want %s, got %s", want, gotLines[i])
|
||||
}
|
||||
}
|
||||
}
|
155
Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics
generated
vendored
Normal file
155
Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
# HELP container_cpu_system_seconds_total Cumulative system cpu time consumed in seconds.
|
||||
# TYPE container_cpu_system_seconds_total counter
|
||||
container_cpu_system_seconds_total{id="testcontainer",name="testcontainer"} 7e-09
|
||||
# HELP container_cpu_usage_seconds_total Cumulative cpu time consumed per cpu in seconds.
|
||||
# TYPE container_cpu_usage_seconds_total counter
|
||||
container_cpu_usage_seconds_total{cpu="cpu00",id="testcontainer",name="testcontainer"} 2e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu01",id="testcontainer",name="testcontainer"} 3e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu02",id="testcontainer",name="testcontainer"} 4e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu03",id="testcontainer",name="testcontainer"} 5e-09
|
||||
# HELP container_cpu_user_seconds_total Cumulative user cpu time consumed in seconds.
|
||||
# TYPE container_cpu_user_seconds_total counter
|
||||
container_cpu_user_seconds_total{id="testcontainer",name="testcontainer"} 6e-09
|
||||
# HELP container_fs_io_current Number of I/Os currently in progress
|
||||
# TYPE container_fs_io_current gauge
|
||||
container_fs_io_current{device="sda1",id="testcontainer",name="testcontainer"} 42
|
||||
container_fs_io_current{device="sda2",id="testcontainer",name="testcontainer"} 47
|
||||
# HELP container_fs_io_time_seconds_total Cumulative count of seconds spent doing I/Os
|
||||
# TYPE container_fs_io_time_seconds_total counter
|
||||
container_fs_io_time_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.3e-08
|
||||
container_fs_io_time_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.8e-08
|
||||
# HELP container_fs_io_time_weighted_seconds_total Cumulative weighted I/O time in seconds
|
||||
# TYPE container_fs_io_time_weighted_seconds_total counter
|
||||
container_fs_io_time_weighted_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.4e-08
|
||||
container_fs_io_time_weighted_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.9e-08
|
||||
# HELP container_fs_limit_bytes Number of bytes that can be consumed by the container on this filesystem.
|
||||
# TYPE container_fs_limit_bytes gauge
|
||||
container_fs_limit_bytes{device="sda1",id="testcontainer",name="testcontainer"} 22
|
||||
container_fs_limit_bytes{device="sda2",id="testcontainer",name="testcontainer"} 37
|
||||
# HELP container_fs_read_seconds_total Cumulative count of seconds spent reading
|
||||
# TYPE container_fs_read_seconds_total counter
|
||||
container_fs_read_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 2.7e-08
|
||||
container_fs_read_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.2e-08
|
||||
# HELP container_fs_reads_merged_total Cumulative count of reads merged
|
||||
# TYPE container_fs_reads_merged_total counter
|
||||
container_fs_reads_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 25
|
||||
container_fs_reads_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 40
|
||||
# HELP container_fs_reads_total Cumulative count of reads completed
|
||||
# TYPE container_fs_reads_total counter
|
||||
container_fs_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 24
|
||||
container_fs_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 39
|
||||
# HELP container_fs_sector_reads_total Cumulative count of sector reads completed
|
||||
# TYPE container_fs_sector_reads_total counter
|
||||
container_fs_sector_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 26
|
||||
container_fs_sector_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 41
|
||||
# HELP container_fs_sector_writes_total Cumulative count of sector writes completed
|
||||
# TYPE container_fs_sector_writes_total counter
|
||||
container_fs_sector_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 40
|
||||
container_fs_sector_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 45
|
||||
# HELP container_fs_usage_bytes Number of bytes that are consumed by the container on this filesystem.
|
||||
# TYPE container_fs_usage_bytes gauge
|
||||
container_fs_usage_bytes{device="sda1",id="testcontainer",name="testcontainer"} 23
|
||||
container_fs_usage_bytes{device="sda2",id="testcontainer",name="testcontainer"} 38
|
||||
# HELP container_fs_write_seconds_total Cumulative count of seconds spent writing
|
||||
# TYPE container_fs_write_seconds_total counter
|
||||
container_fs_write_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.1e-08
|
||||
container_fs_write_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.6e-08
|
||||
# HELP container_fs_writes_merged_total Cumulative count of writes merged
|
||||
# TYPE container_fs_writes_merged_total counter
|
||||
container_fs_writes_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 39
|
||||
container_fs_writes_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 44
|
||||
# HELP container_fs_writes_total Cumulative count of writes completed
|
||||
# TYPE container_fs_writes_total counter
|
||||
container_fs_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 28
|
||||
container_fs_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 43
|
||||
# HELP container_last_seen Last time a container was seen by the exporter
|
||||
# TYPE container_last_seen gauge
|
||||
container_last_seen{id="testcontainer",name="testcontainer"} 1.426203694e+09
|
||||
# HELP container_memory_failures_total Cumulative count of memory allocation failures.
|
||||
# TYPE container_memory_failures_total counter
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgfault"} 10
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgmajfault"} 11
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgfault"} 12
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgmajfault"} 13
|
||||
# HELP container_memory_usage_bytes Current memory usage in bytes.
|
||||
# TYPE container_memory_usage_bytes gauge
|
||||
container_memory_usage_bytes{id="testcontainer",name="testcontainer"} 8
|
||||
# HELP container_memory_working_set_bytes Current working set in bytes.
|
||||
# TYPE container_memory_working_set_bytes gauge
|
||||
container_memory_working_set_bytes{id="testcontainer",name="testcontainer"} 9
|
||||
# HELP container_network_receive_bytes_total Cumulative count of bytes received
|
||||
# TYPE container_network_receive_bytes_total counter
|
||||
container_network_receive_bytes_total{id="testcontainer",name="testcontainer"} 14
|
||||
# HELP container_network_receive_errors_total Cumulative count of errors encountered while receiving
|
||||
# TYPE container_network_receive_errors_total counter
|
||||
container_network_receive_errors_total{id="testcontainer",name="testcontainer"} 16
|
||||
# HELP container_network_receive_packets_dropped_total Cumulative count of packets dropped while receiving
|
||||
# TYPE container_network_receive_packets_dropped_total counter
|
||||
container_network_receive_packets_dropped_total{id="testcontainer",name="testcontainer"} 17
|
||||
# HELP container_network_receive_packets_total Cumulative count of packets received
|
||||
# TYPE container_network_receive_packets_total counter
|
||||
container_network_receive_packets_total{id="testcontainer",name="testcontainer"} 15
|
||||
# HELP container_network_transmit_bytes_total Cumulative count of bytes transmitted
|
||||
# TYPE container_network_transmit_bytes_total counter
|
||||
container_network_transmit_bytes_total{id="testcontainer",name="testcontainer"} 18
|
||||
# HELP container_network_transmit_errors_total Cumulative count of errors encountered while transmitting
|
||||
# TYPE container_network_transmit_errors_total counter
|
||||
container_network_transmit_errors_total{id="testcontainer",name="testcontainer"} 20
|
||||
# HELP container_network_transmit_packets_dropped_total Cumulative count of packets dropped while transmitting
|
||||
# TYPE container_network_transmit_packets_dropped_total counter
|
||||
container_network_transmit_packets_dropped_total{id="testcontainer",name="testcontainer"} 21
|
||||
# HELP container_network_transmit_packets_total Cumulative count of packets transmitted
|
||||
# TYPE container_network_transmit_packets_total counter
|
||||
container_network_transmit_packets_total{id="testcontainer",name="testcontainer"} 19
|
||||
# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise
|
||||
# TYPE container_scrape_error gauge
|
||||
container_scrape_error 0
|
||||
# HELP container_tasks_state Number of tasks in given state
|
||||
# TYPE container_tasks_state gauge
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="iowaiting"} 54
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="running"} 51
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="sleeping"} 50
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="stopped"} 52
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="uninterruptible"} 53
|
||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 0
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 0
|
||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
||||
# TYPE http_request_size_bytes summary
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="prometheus"} 0
|
||||
http_request_size_bytes_count{handler="prometheus"} 0
|
||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
||||
# TYPE http_response_size_bytes summary
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="prometheus"} 0
|
||||
http_response_size_bytes_count{handler="prometheus"} 0
|
||||
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
|
||||
# TYPE process_cpu_seconds_total counter
|
||||
process_cpu_seconds_total 0
|
||||
# HELP process_goroutines Number of goroutines that currently exist.
|
||||
# TYPE process_goroutines gauge
|
||||
process_goroutines 16
|
||||
# HELP process_max_fds Maximum number of open file descriptors.
|
||||
# TYPE process_max_fds gauge
|
||||
process_max_fds 1024
|
||||
# HELP process_open_fds Number of open file descriptors.
|
||||
# TYPE process_open_fds gauge
|
||||
process_open_fds 4
|
||||
# HELP process_resident_memory_bytes Resident memory size in bytes.
|
||||
# TYPE process_resident_memory_bytes gauge
|
||||
process_resident_memory_bytes 7.74144e+06
|
||||
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
|
||||
# TYPE process_start_time_seconds gauge
|
||||
process_start_time_seconds 1.42620369439e+09
|
||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
||||
# TYPE process_virtual_memory_bytes gauge
|
||||
process_virtual_memory_bytes 1.16420608e+08
|
Loading…
Reference in New Issue
Block a user