Merge pull request #24542 from Random-Liu/versioned-perf-data

Automatic merge from submit-queue

Use mCPU as CPU usage unit, add version in PerfData, and fix memory usage bug.

Partially addressed #24436.

This PR:
1) Change the CPU usage unit to "mCPU"
2) Add version in PerfData, and perfdash will only support the newest version now.
3) Fix stupid mistake when calculating the memory usage average.

/cc @vishh
This commit is contained in:
k8s-merge-robot 2016-04-23 01:17:59 -07:00
commit 71f0d058b6
3 changed files with 30 additions and 8 deletions

View File

@ -669,7 +669,7 @@ func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode)
nodeAvgUsage[c] = &ContainerResourceUsage{Name: usage.Name} nodeAvgUsage[c] = &ContainerResourceUsage{Name: usage.Name}
} }
nodeAvgUsage[c].CPUUsageInCores += usage.CPUUsageInCores nodeAvgUsage[c].CPUUsageInCores += usage.CPUUsageInCores
nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes nodeAvgUsage[c].MemoryUsageInBytes += usage.MemoryUsageInBytes
nodeAvgUsage[c].MemoryWorkingSetInBytes += usage.MemoryWorkingSetInBytes nodeAvgUsage[c].MemoryWorkingSetInBytes += usage.MemoryWorkingSetInBytes
nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes
} }

View File

@ -25,9 +25,13 @@ import (
// TODO(random-liu): Change the tests to actually use PerfData from the beginning instead of // TODO(random-liu): Change the tests to actually use PerfData from the beginning instead of
// translating one to the other here. // translating one to the other here.
// currentApiCallMetricsVersion is the current apicall performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentApiCallMetricsVersion = "v1"
// ApiCallToPerfData transforms APIResponsiveness to PerfData. // ApiCallToPerfData transforms APIResponsiveness to PerfData.
func ApiCallToPerfData(apicalls APIResponsiveness) *perftype.PerfData { func ApiCallToPerfData(apicalls APIResponsiveness) *perftype.PerfData {
perfData := &perftype.PerfData{} perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
for _, apicall := range apicalls.APICalls { for _, apicall := range apicalls.APICalls {
item := perftype.DataItem{ item := perftype.DataItem{
Data: map[string]float64{ Data: map[string]float64{
@ -46,6 +50,10 @@ func ApiCallToPerfData(apicalls APIResponsiveness) *perftype.PerfData {
return perfData return perfData
} }
// currentKubeletPerfMetricsVersion is the current kubelet performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentKubeletPerfMetricsVersion = "v1"
// ResourceUsageToPerfData transforms ResourceUsagePerNode to PerfData. Notice that this function // ResourceUsageToPerfData transforms ResourceUsagePerNode to PerfData. Notice that this function
// only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary. // only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfData { func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfData {
@ -54,7 +62,7 @@ func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfDa
for c, usage := range usages { for c, usage := range usages {
item := perftype.DataItem{ item := perftype.DataItem{
Data: map[string]float64{ Data: map[string]float64{
"memory": float64(usage.MemoryRSSInBytes) / (1024 * 1024), "memory": float64(usage.MemoryUsageInBytes) / (1024 * 1024),
"workingset": float64(usage.MemoryWorkingSetInBytes) / (1024 * 1024), "workingset": float64(usage.MemoryWorkingSetInBytes) / (1024 * 1024),
"rss": float64(usage.MemoryRSSInBytes) / (1024 * 1024), "rss": float64(usage.MemoryRSSInBytes) / (1024 * 1024),
}, },
@ -68,7 +76,10 @@ func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfDa
items = append(items, item) items = append(items, item)
} }
} }
return &perftype.PerfData{DataItems: items} return &perftype.PerfData{
Version: currentKubeletPerfMetricsVersion,
DataItems: items,
}
} }
// CPUUsageToPerfData transforms NodesCPUSummary to PerfData. // CPUUsageToPerfData transforms NodesCPUSummary to PerfData.
@ -78,11 +89,11 @@ func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
for c, usage := range usages { for c, usage := range usages {
data := map[string]float64{} data := map[string]float64{}
for perc, value := range usage { for perc, value := range usage {
data[fmt.Sprintf("Perc%02.0f", perc*100)] = value * 100 data[fmt.Sprintf("Perc%02.0f", perc*100)] = value * 1000
} }
item := perftype.DataItem{ item := perftype.DataItem{
Data: data, Data: data,
Unit: "%", Unit: "mCPU",
Labels: map[string]string{ Labels: map[string]string{
"node": node, "node": node,
"container": c, "container": c,
@ -92,13 +103,17 @@ func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
items = append(items, item) items = append(items, item)
} }
} }
return &perftype.PerfData{DataItems: items} return &perftype.PerfData{
Version: currentKubeletPerfMetricsVersion,
DataItems: items,
}
} }
// PrintPerfData prints the perfdata in json format with PerfResultTag prefix. // PrintPerfData prints the perfdata in json format with PerfResultTag prefix.
// If an error occurs, nothing will be printed. // If an error occurs, nothing will be printed.
func PrintPerfData(p *perftype.PerfData) { func PrintPerfData(p *perftype.PerfData) {
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
if str := PrettyPrintJSON(p); str != "" { if str := PrettyPrintJSON(p); str != "" {
Logf("%s", perftype.PerfResultTag+" "+str) Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
} }
} }

View File

@ -36,9 +36,16 @@ type DataItem struct {
// PerfData contains all data items generated in current test. // PerfData contains all data items generated in current test.
type PerfData struct { type PerfData struct {
// Version is the version of the metrics. The metrics consumer could use the version
// to detect metrics version change and decide what version to support.
Version string `json:"version"`
DataItems []DataItem `json:"dataItems"` DataItems []DataItem `json:"dataItems"`
} }
// PerfResultTag is the prefix of generated perfdata. Analyzing tools can find the perf result // PerfResultTag is the prefix of generated perfdata. Analyzing tools can find the perf result
// with this tag. // with this tag.
const PerfResultTag = "[Result:Performance]" const PerfResultTag = "[Result:Performance]"
// PerfResultEnd is the end of generated perfdata. Analyzing tools can find the end of the perf
// result with this tag.
const PerfResultEnd = "[Finish:Performance]"