mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
Merge pull request #51553 from wongma7/pvc-prometheus
Automatic merge from submit-queue Expose PVC metrics via kubelet prometheus This depends on https://github.com/kubernetes/kubernetes/pull/51448, opening early though. second commit is mine and mostly a copy/paste job. implements metrics listed in here https://github.com/kubernetes/community/pull/855 following method here https://github.com/kubernetes/community/pull/930#issuecomment-325509736 **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: https://github.com/kubernetes/features/issues/363 **Special notes for your reviewer**: **Release note**: ```release-note PersistentVolumeClaim metrics like "volume_stats_inodes" and "volume_stats_capacity_bytes" are now reported via kubelet prometheus ```
This commit is contained in:
commit
578195873a
@ -26,18 +26,24 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
KubeletSubsystem = "kubelet"
|
KubeletSubsystem = "kubelet"
|
||||||
PodWorkerLatencyKey = "pod_worker_latency_microseconds"
|
PodWorkerLatencyKey = "pod_worker_latency_microseconds"
|
||||||
PodStartLatencyKey = "pod_start_latency_microseconds"
|
PodStartLatencyKey = "pod_start_latency_microseconds"
|
||||||
CgroupManagerOperationsKey = "cgroup_manager_latency_microseconds"
|
CgroupManagerOperationsKey = "cgroup_manager_latency_microseconds"
|
||||||
DockerOperationsLatencyKey = "docker_operations_latency_microseconds"
|
DockerOperationsLatencyKey = "docker_operations_latency_microseconds"
|
||||||
DockerOperationsKey = "docker_operations"
|
DockerOperationsKey = "docker_operations"
|
||||||
DockerOperationsErrorsKey = "docker_operations_errors"
|
DockerOperationsErrorsKey = "docker_operations_errors"
|
||||||
DockerOperationsTimeoutKey = "docker_operations_timeout"
|
DockerOperationsTimeoutKey = "docker_operations_timeout"
|
||||||
PodWorkerStartLatencyKey = "pod_worker_start_latency_microseconds"
|
PodWorkerStartLatencyKey = "pod_worker_start_latency_microseconds"
|
||||||
PLEGRelistLatencyKey = "pleg_relist_latency_microseconds"
|
PLEGRelistLatencyKey = "pleg_relist_latency_microseconds"
|
||||||
PLEGRelistIntervalKey = "pleg_relist_interval_microseconds"
|
PLEGRelistIntervalKey = "pleg_relist_interval_microseconds"
|
||||||
EvictionStatsAgeKey = "eviction_stats_age_microseconds"
|
EvictionStatsAgeKey = "eviction_stats_age_microseconds"
|
||||||
|
VolumeStatsCapacityBytesKey = "volume_stats_capacity_bytes"
|
||||||
|
VolumeStatsAvailableBytesKey = "volume_stats_available_bytes"
|
||||||
|
VolumeStatsUsedBytesKey = "volume_stats_used_bytes"
|
||||||
|
VolumeStatsInodesKey = "volume_stats_inodes"
|
||||||
|
VolumeStatsInodesFreeKey = "volume_stats_inodes_free"
|
||||||
|
VolumeStatsInodesUsedKey = "volume_stats_inodes_used"
|
||||||
// Metrics keys of remote runtime operations
|
// Metrics keys of remote runtime operations
|
||||||
RuntimeOperationsKey = "runtime_operations"
|
RuntimeOperationsKey = "runtime_operations"
|
||||||
RuntimeOperationsLatencyKey = "runtime_operations_latency_microseconds"
|
RuntimeOperationsLatencyKey = "runtime_operations_latency_microseconds"
|
||||||
@ -162,6 +168,54 @@ var (
|
|||||||
},
|
},
|
||||||
[]string{"eviction_signal"},
|
[]string{"eviction_signal"},
|
||||||
)
|
)
|
||||||
|
VolumeStatsCapacityBytes = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: KubeletSubsystem,
|
||||||
|
Name: VolumeStatsCapacityBytesKey,
|
||||||
|
Help: "Capacity in bytes of the volume",
|
||||||
|
},
|
||||||
|
[]string{"namespace", "persistentvolumeclaim"},
|
||||||
|
)
|
||||||
|
VolumeStatsAvailableBytes = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: KubeletSubsystem,
|
||||||
|
Name: VolumeStatsAvailableBytesKey,
|
||||||
|
Help: "Number of available bytes in the volume",
|
||||||
|
},
|
||||||
|
[]string{"namespace", "persistentvolumeclaim"},
|
||||||
|
)
|
||||||
|
VolumeStatsUsedBytes = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: KubeletSubsystem,
|
||||||
|
Name: VolumeStatsUsedBytesKey,
|
||||||
|
Help: "Number of used bytes in the volume",
|
||||||
|
},
|
||||||
|
[]string{"namespace", "persistentvolumeclaim"},
|
||||||
|
)
|
||||||
|
VolumeStatsInodes = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: KubeletSubsystem,
|
||||||
|
Name: VolumeStatsInodesKey,
|
||||||
|
Help: "Maximum number of inodes in the volume",
|
||||||
|
},
|
||||||
|
[]string{"namespace", "persistentvolumeclaim"},
|
||||||
|
)
|
||||||
|
VolumeStatsInodesFree = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: KubeletSubsystem,
|
||||||
|
Name: VolumeStatsInodesFreeKey,
|
||||||
|
Help: "Number of free inodes in the volume",
|
||||||
|
},
|
||||||
|
[]string{"namespace", "persistentvolumeclaim"},
|
||||||
|
)
|
||||||
|
VolumeStatsInodesUsed = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Subsystem: KubeletSubsystem,
|
||||||
|
Name: VolumeStatsInodesUsedKey,
|
||||||
|
Help: "Number of used inodes in the volume",
|
||||||
|
},
|
||||||
|
[]string{"namespace", "persistentvolumeclaim"},
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
var registerMetrics sync.Once
|
var registerMetrics sync.Once
|
||||||
@ -186,6 +240,12 @@ func Register(containerCache kubecontainer.RuntimeCache) {
|
|||||||
prometheus.MustRegister(RuntimeOperationsLatency)
|
prometheus.MustRegister(RuntimeOperationsLatency)
|
||||||
prometheus.MustRegister(RuntimeOperationsErrors)
|
prometheus.MustRegister(RuntimeOperationsErrors)
|
||||||
prometheus.MustRegister(EvictionStatsAge)
|
prometheus.MustRegister(EvictionStatsAge)
|
||||||
|
prometheus.MustRegister(VolumeStatsCapacityBytes)
|
||||||
|
prometheus.MustRegister(VolumeStatsAvailableBytes)
|
||||||
|
prometheus.MustRegister(VolumeStatsUsedBytes)
|
||||||
|
prometheus.MustRegister(VolumeStatsInodes)
|
||||||
|
prometheus.MustRegister(VolumeStatsInodesFree)
|
||||||
|
prometheus.MustRegister(VolumeStatsInodesUsed)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@ go_library(
|
|||||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||||
"//pkg/kubelet/cm:go_default_library",
|
"//pkg/kubelet/cm:go_default_library",
|
||||||
"//pkg/kubelet/container:go_default_library",
|
"//pkg/kubelet/container:go_default_library",
|
||||||
|
"//pkg/kubelet/metrics:go_default_library",
|
||||||
"//pkg/kubelet/util/format:go_default_library",
|
"//pkg/kubelet/util/format:go_default_library",
|
||||||
"//pkg/volume:go_default_library",
|
"//pkg/volume:go_default_library",
|
||||||
"//vendor/github.com/emicklei/go-restful:go_default_library",
|
"//vendor/github.com/emicklei/go-restful:go_default_library",
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
|
||||||
@ -85,6 +86,7 @@ func (s *volumeStatCalculator) GetLatest() (PodVolumeStats, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calcAndStoreStats calculates PodVolumeStats for a given pod and writes the result to the s.latest cache.
|
// calcAndStoreStats calculates PodVolumeStats for a given pod and writes the result to the s.latest cache.
|
||||||
|
// If the pod references PVCs, the prometheus metrics for those are updated with the result.
|
||||||
func (s *volumeStatCalculator) calcAndStoreStats() {
|
func (s *volumeStatCalculator) calcAndStoreStats() {
|
||||||
// Find all Volumes for the Pod
|
// Find all Volumes for the Pod
|
||||||
volumes, found := s.statsProvider.ListVolumesForPod(s.pod.UID)
|
volumes, found := s.statsProvider.ListVolumesForPod(s.pod.UID)
|
||||||
@ -117,6 +119,8 @@ func (s *volumeStatCalculator) calcAndStoreStats() {
|
|||||||
Namespace: s.pod.GetNamespace(),
|
Namespace: s.pod.GetNamespace(),
|
||||||
}
|
}
|
||||||
fsStats = append(fsStats, s.parsePodVolumeStats(name, &pvcRef, metric))
|
fsStats = append(fsStats, s.parsePodVolumeStats(name, &pvcRef, metric))
|
||||||
|
// Set the PVC's prometheus metrics
|
||||||
|
s.setPVCMetrics(&pvcRef, metric)
|
||||||
} else {
|
} else {
|
||||||
fsStats = append(fsStats, s.parsePodVolumeStats(name, nil, metric))
|
fsStats = append(fsStats, s.parsePodVolumeStats(name, nil, metric))
|
||||||
}
|
}
|
||||||
@ -141,3 +145,13 @@ func (s *volumeStatCalculator) parsePodVolumeStats(podName string, pvcRef *stats
|
|||||||
UsedBytes: &used, Inodes: &inodes, InodesFree: &inodesFree, InodesUsed: &inodesUsed},
|
UsedBytes: &used, Inodes: &inodes, InodesFree: &inodesFree, InodesUsed: &inodesUsed},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setPVCMetrics sets the given PVC's prometheus metrics to match the given volume.Metrics
|
||||||
|
func (s *volumeStatCalculator) setPVCMetrics(pvcRef *stats.PVCReference, metric *volume.Metrics) {
|
||||||
|
metrics.VolumeStatsAvailableBytes.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.Available.Value()))
|
||||||
|
metrics.VolumeStatsCapacityBytes.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.Capacity.Value()))
|
||||||
|
metrics.VolumeStatsUsedBytes.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.Used.Value()))
|
||||||
|
metrics.VolumeStatsInodes.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.Inodes.Value()))
|
||||||
|
metrics.VolumeStatsInodesFree.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.InodesFree.Value()))
|
||||||
|
metrics.VolumeStatsInodesUsed.WithLabelValues(pvcRef.Namespace, pvcRef.Name).Set(float64(metric.InodesUsed.Value()))
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user