mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 12:07:47 +00:00
kubelet: fix metric container_start_time_seconds's timestamp
Adapting the tests and reverting https://github.com/kubernetes/kubernetes/pull/103429 Carry-over from https://github.com/kubernetes/kubernetes/pull/117881 Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
This commit is contained in:
parent
f8ca582a06
commit
5e0931336b
@ -211,8 +211,7 @@ func (rc *resourceMetricsCollector) collectContainerStartTime(ch chan<- metrics.
|
||||
return
|
||||
}
|
||||
|
||||
ch <- metrics.NewLazyMetricWithTimestamp(s.StartTime.Time,
|
||||
metrics.NewLazyConstMetric(containerStartTimeDesc, metrics.GaugeValue, float64(s.StartTime.UnixNano())/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
|
||||
ch <- metrics.NewLazyConstMetric(containerStartTimeDesc, metrics.GaugeValue, float64(s.StartTime.UnixNano())/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace)
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectContainerCPUMetrics(ch chan<- metrics.Metric, pod summary.PodStats, s summary.ContainerStats) {
|
||||
|
@ -213,9 +213,9 @@ func TestCollectResourceMetrics(t *testing.T) {
|
||||
container_memory_working_set_bytes{container="container_b",namespace="namespace_a",pod="pod_a"} 1000 1624396278302
|
||||
# HELP container_start_time_seconds [STABLE] Start time of the container since unix epoch in seconds
|
||||
# TYPE container_start_time_seconds gauge
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09 1624396248302
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09 1624395678302
|
||||
container_start_time_seconds{container="container_b",namespace="namespace_a",pod="pod_a"} 1.6243961583020916e+09 1624396158302
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09
|
||||
container_start_time_seconds{container="container_b",namespace="namespace_a",pod="pod_a"} 1.6243961583020916e+09
|
||||
# HELP container_swap_usage_bytes [ALPHA] Current amount of the container swap usage in bytes. Reported only on non-windows systems
|
||||
# TYPE container_swap_usage_bytes gauge
|
||||
container_swap_usage_bytes{container="container_a",namespace="namespace_a",pod="pod_a"} 1000 1624396278302
|
||||
@ -319,8 +319,8 @@ func TestCollectResourceMetrics(t *testing.T) {
|
||||
container_memory_working_set_bytes{container="container_a",namespace="namespace_b",pod="pod_b"} 1000 1624396278302
|
||||
# HELP container_start_time_seconds [STABLE] Start time of the container since unix epoch in seconds
|
||||
# TYPE container_start_time_seconds gauge
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09 1624396248302
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09 1624395678302
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_a",pod="pod_a"} 1.6243962483020916e+09
|
||||
container_start_time_seconds{container="container_a",namespace="namespace_b",pod="pod_b"} 1.6243956783020916e+09
|
||||
# HELP scrape_error [ALPHA] 1 if there was an error while getting container metrics, 0 otherwise
|
||||
# TYPE scrape_error gauge
|
||||
scrape_error 0
|
||||
|
@ -163,6 +163,10 @@ func boundedSample(lower, upper interface{}) types.GomegaMatcher {
|
||||
"Metric": gstruct.Ignore(),
|
||||
"Value": gomega.And(gomega.BeNumerically(">=", lower), gomega.BeNumerically("<=", upper)),
|
||||
"Timestamp": gomega.WithTransform(func(t model.Time) time.Time {
|
||||
if t.Unix() <= 0 {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// model.Time is in Milliseconds since epoch
|
||||
return time.Unix(0, int64(t)*int64(time.Millisecond))
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user