diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index 66a6d30ee47..f21379552e4 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -134,9 +134,10 @@ var InterestingKubeletMetrics = []string{ // Dashboard metrics type LatencyMetric struct { - Perc50 time.Duration `json:"Perc50"` - Perc90 time.Duration `json:"Perc90"` - Perc99 time.Duration `json:"Perc99"` + Perc50 time.Duration `json:"Perc50"` + Perc90 time.Duration `json:"Perc90"` + Perc99 time.Duration `json:"Perc99"` + Perc100 time.Duration `json:"Perc100"` } type PodStartupLatency struct { @@ -450,7 +451,8 @@ func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric { perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency - return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99} + perc100 := latencies[length-1].Latency + return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100} } // LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index eb404ec0ecc..919e79e94ee 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -27,7 +27,7 @@ import ( ) const ( - // TODO(coufon): be consistent with perf_util.go version (not exposed) + // TODO(coufon): be consistent with perf_util.go version currentTimeSeriesVersion = "v1" TimeSeriesTag = "[Result:TimeSeries]" TimeSeriesEnd = "[Finish:TimeSeries]" @@ -35,8 +35,8 @@ const ( type NodeTimeSeries struct { // value in OperationData is an array of timestamps - OperationData map[string][]int64 `json:"op_data,omitempty"` - ResourceData map[string]*ResourceSeries `json:"resource_data,omitempty"` + OperationData map[string][]int64 `json:"op_series,omitempty"` + ResourceData map[string]*ResourceSeries `json:"resource_series,omitempty"` Labels map[string]string `json:"labels"` Version string `json:"version"` } @@ -85,14 +85,15 @@ func getLatencyPerfData(latency framework.LatencyMetric, testName string) *perft DataItems: []perftype.DataItem{ { Data: map[string]float64{ - "Perc50": float64(latency.Perc50) / 1000000, - "Perc90": float64(latency.Perc90) / 1000000, - "Perc99": float64(latency.Perc99) / 1000000, + "Perc50": float64(latency.Perc50) / 1000000, + "Perc90": float64(latency.Perc90) / 1000000, + "Perc99": float64(latency.Perc99) / 1000000, + "Perc100": float64(latency.Perc100) / 1000000, }, Unit: "ms", Labels: map[string]string{ "datatype": "latency", - "latencytype": "test-e2e", + "latencytype": "create-pod", }, }, }, diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index 83f44012a60..b9472eddd72 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -372,7 +372,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de rc.Start() defer rc.Stop() - // create pods sequentially (back-to-back) + // Create pods sequentially (back-to-back). e2eLags have been sorted. batchlag, e2eLags := createBatchPodSequential(f, testPods) // Log throughput data. diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 9d3a396646a..79285831742 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -206,7 +206,7 @@ func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r resourceUsageByCPU) Less(i, j int) bool { return r[i].CPUUsageInCores < r[j].CPUUsageInCores } // The percentiles to report. -var percentiles = [...]float64{0.05, 0.20, 0.50, 0.70, 0.90, 0.95, 0.99} +var percentiles = [...]float64{0.50, 0.90, 0.95, 0.99, 1.00} // GetBasicCPUStats returns the percentiles the cpu usage in cores for // containerName. This method examines all data currently in the buffer.