mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
e2e: move funs of framework/metrics to e2e_node
This commit is contained in:
parent
ded2ff39c3
commit
8d3a8d5a6c
@ -19,7 +19,6 @@ go_library(
|
||||
"metrics_grabber.go",
|
||||
"pod.go",
|
||||
"scheduler_metrics.go",
|
||||
"scheduling.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
|
||||
deps = [
|
||||
|
@ -17,10 +17,7 @@ limitations under the License.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -46,20 +43,3 @@ type LatencySlice []PodLatencyData
|
||||
func (a LatencySlice) Len() int { return len(a) }
|
||||
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
|
||||
|
||||
// ExtractLatencyMetrics returns latency metrics for each percentile(50th, 90th and 99th).
|
||||
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
|
||||
length := len(latencies)
|
||||
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
|
||||
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency
|
||||
perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency
|
||||
perc100 := latencies[length-1].Latency
|
||||
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100}
|
||||
}
|
||||
|
||||
// PrintLatencies outputs latencies to log with readable format.
|
||||
func PrintLatencies(latencies []PodLatencyData, header string) {
|
||||
metrics := ExtractLatencyMetrics(latencies)
|
||||
e2elog.Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
|
||||
e2elog.Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
|
||||
}
|
||||
|
@ -1,44 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
// SchedulingMetrics is a struct for managing scheduling metrics.
|
||||
type SchedulingMetrics struct {
|
||||
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
|
||||
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
|
||||
PreemptionEvaluationLatency LatencyMetric `json:"preemptionEvaluationLatency"`
|
||||
BindingLatency LatencyMetric `json:"bindingLatency"`
|
||||
ThroughputAverage float64 `json:"throughputAverage"`
|
||||
ThroughputPerc50 float64 `json:"throughputPerc50"`
|
||||
ThroughputPerc90 float64 `json:"throughputPerc90"`
|
||||
ThroughputPerc99 float64 `json:"throughputPerc99"`
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of scheduling metrics.
|
||||
func (l *SchedulingMetrics) SummaryKind() string {
|
||||
return "SchedulingMetrics"
|
||||
}
|
||||
|
||||
// PrintHumanReadable returns scheduling metrics with JSON format.
|
||||
func (l *SchedulingMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
// PrintJSON returns scheduling metrics with JSON format.
|
||||
func (l *SchedulingMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
@ -218,6 +218,7 @@ go_test(
|
||||
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/kubelet:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/perf:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
@ -235,6 +236,7 @@ go_test(
|
||||
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/kubelet:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/perf:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
|
@ -20,6 +20,7 @@ package e2enode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
@ -36,6 +37,7 @@ import (
|
||||
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -548,16 +550,33 @@ func verifyLatencyWithinThreshold(threshold, actual e2emetrics.LatencyMetric, me
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractLatencyMetrics returns latency metrics for each percentile(50th, 90th and 99th).
|
||||
func extractLatencyMetrics(latencies []e2emetrics.PodLatencyData) e2emetrics.LatencyMetric {
|
||||
length := len(latencies)
|
||||
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
|
||||
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency
|
||||
perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency
|
||||
perc100 := latencies[length-1].Latency
|
||||
return e2emetrics.LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100}
|
||||
}
|
||||
|
||||
// printLatencies outputs latencies to log with readable format.
|
||||
func printLatencies(latencies []e2emetrics.PodLatencyData, header string) {
|
||||
metrics := extractLatencyMetrics(latencies)
|
||||
e2elog.Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
|
||||
e2elog.Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
|
||||
}
|
||||
|
||||
// logAndVerifyLatency verifies that whether pod creation latency satisfies the limit.
|
||||
func logAndVerifyLatency(batchLag time.Duration, e2eLags []e2emetrics.PodLatencyData, podStartupLimits e2emetrics.LatencyMetric,
|
||||
podBatchStartupLimit time.Duration, testInfo map[string]string, isVerify bool) {
|
||||
e2emetrics.PrintLatencies(e2eLags, "worst client e2e total latencies")
|
||||
printLatencies(e2eLags, "worst client e2e total latencies")
|
||||
|
||||
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
|
||||
latencyMetrics, _ := getPodStartLatency(kubeletAddr)
|
||||
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", framework.PrettyPrintJSON(latencyMetrics))
|
||||
|
||||
podStartupLatency := e2emetrics.ExtractLatencyMetrics(e2eLags)
|
||||
podStartupLatency := extractLatencyMetrics(e2eLags)
|
||||
|
||||
// log latency perf data
|
||||
logPerfData(getLatencyPerfData(podStartupLatency, testInfo), "latency")
|
||||
|
Loading…
Reference in New Issue
Block a user