add peak (100%) lantecy and CPU usage in perf data

This commit is contained in:
Zhou Fang 2016-08-19 14:21:04 -07:00
parent 95eb9efb11
commit 30eb6882f4
4 changed files with 16 additions and 13 deletions

View File

@ -134,9 +134,10 @@ var InterestingKubeletMetrics = []string{
// Dashboard metrics
type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
Perc99 time.Duration `json:"Perc99"`
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
Perc99 time.Duration `json:"Perc99"`
Perc100 time.Duration `json:"Perc100"`
}
type PodStartupLatency struct {
@ -450,7 +451,8 @@ func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency
perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99}
perc100 := latencies[length-1].Latency
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100}
}
// LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times

View File

@ -27,7 +27,7 @@ import (
)
const (
// TODO(coufon): be consistent with perf_util.go version (not exposed)
// TODO(coufon): be consistent with perf_util.go version
currentTimeSeriesVersion = "v1"
TimeSeriesTag = "[Result:TimeSeries]"
TimeSeriesEnd = "[Finish:TimeSeries]"
@ -35,8 +35,8 @@ const (
type NodeTimeSeries struct {
// value in OperationData is an array of timestamps
OperationData map[string][]int64 `json:"op_data,omitempty"`
ResourceData map[string]*ResourceSeries `json:"resource_data,omitempty"`
OperationData map[string][]int64 `json:"op_series,omitempty"`
ResourceData map[string]*ResourceSeries `json:"resource_series,omitempty"`
Labels map[string]string `json:"labels"`
Version string `json:"version"`
}
@ -85,14 +85,15 @@ func getLatencyPerfData(latency framework.LatencyMetric, testName string) *perft
DataItems: []perftype.DataItem{
{
Data: map[string]float64{
"Perc50": float64(latency.Perc50) / 1000000,
"Perc90": float64(latency.Perc90) / 1000000,
"Perc99": float64(latency.Perc99) / 1000000,
"Perc50": float64(latency.Perc50) / 1000000,
"Perc90": float64(latency.Perc90) / 1000000,
"Perc99": float64(latency.Perc99) / 1000000,
"Perc100": float64(latency.Perc100) / 1000000,
},
Unit: "ms",
Labels: map[string]string{
"datatype": "latency",
"latencytype": "test-e2e",
"latencytype": "create-pod",
},
},
},

View File

@ -372,7 +372,7 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
rc.Start()
defer rc.Stop()
// create pods sequentially (back-to-back)
// Create pods sequentially (back-to-back). e2eLags have been sorted.
batchlag, e2eLags := createBatchPodSequential(f, testPods)
// Log throughput data.

View File

@ -206,7 +206,7 @@ func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r resourceUsageByCPU) Less(i, j int) bool { return r[i].CPUUsageInCores < r[j].CPUUsageInCores }
// The percentiles to report.
var percentiles = [...]float64{0.05, 0.20, 0.50, 0.70, 0.90, 0.95, 0.99}
var percentiles = [...]float64{0.50, 0.90, 0.95, 0.99, 1.00}
// GetBasicCPUStats returns the percentiles the cpu usage in cores for
// containerName. This method examines all data currently in the buffer.