mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #81849 from oomichi/move-PrintPerfData
Move PrintPerfData to the test
This commit is contained in:
commit
39724859b5
@ -7,8 +7,6 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//test/e2e/framework/kubelet:go_default_library",
|
"//test/e2e/framework/kubelet:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
|
||||||
"//test/e2e/perftype:go_default_library",
|
"//test/e2e/perftype:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -20,8 +20,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
|
||||||
"k8s.io/kubernetes/test/e2e/perftype"
|
"k8s.io/kubernetes/test/e2e/perftype"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -41,15 +39,6 @@ func CPUUsageToPerfData(usagePerNode e2ekubelet.NodesCPUSummary) *perftype.PerfD
|
|||||||
return CPUUsageToPerfDataWithLabels(usagePerNode, nil)
|
return CPUUsageToPerfDataWithLabels(usagePerNode, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintPerfData prints the perfdata in json format with PerfResultTag prefix.
|
|
||||||
// If an error occurs, nothing will be printed.
|
|
||||||
func PrintPerfData(p *perftype.PerfData) {
|
|
||||||
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
|
|
||||||
if str := e2emetrics.PrettyPrintJSON(p); str != "" {
|
|
||||||
e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResourceUsageToPerfDataWithLabels transforms ResourceUsagePerNode to PerfData with additional labels.
|
// ResourceUsageToPerfDataWithLabels transforms ResourceUsagePerNode to PerfData with additional labels.
|
||||||
// Notice that this function only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
|
// Notice that this function only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
|
||||||
func ResourceUsageToPerfDataWithLabels(usagePerNode e2ekubelet.ResourceUsagePerNode, labels map[string]string) *perftype.PerfData {
|
func ResourceUsageToPerfDataWithLabels(usagePerNode e2ekubelet.ResourceUsagePerNode, labels map[string]string) *perftype.PerfData {
|
||||||
|
@ -39,11 +39,13 @@ go_library(
|
|||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
"//test/e2e/framework/kubelet:go_default_library",
|
"//test/e2e/framework/kubelet:go_default_library",
|
||||||
"//test/e2e/framework/log:go_default_library",
|
"//test/e2e/framework/log:go_default_library",
|
||||||
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
"//test/e2e/framework/node:go_default_library",
|
"//test/e2e/framework/node:go_default_library",
|
||||||
"//test/e2e/framework/perf:go_default_library",
|
"//test/e2e/framework/perf:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/ssh:go_default_library",
|
"//test/e2e/framework/ssh:go_default_library",
|
||||||
"//test/e2e/framework/volume:go_default_library",
|
"//test/e2e/framework/volume:go_default_library",
|
||||||
|
"//test/e2e/perftype:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
|
@ -28,7 +28,9 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
|
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
|
||||||
|
"k8s.io/kubernetes/test/e2e/perftype"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -109,13 +111,13 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
|||||||
// TODO(random-liu): Remove the original log when we migrate to new perfdash
|
// TODO(random-liu): Remove the original log when we migrate to new perfdash
|
||||||
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
|
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
|
||||||
// Log perf result
|
// Log perf result
|
||||||
e2eperf.PrintPerfData(e2eperf.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
|
printPerfData(e2eperf.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
|
||||||
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
|
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
|
||||||
|
|
||||||
cpuSummary := rm.GetCPUSummary()
|
cpuSummary := rm.GetCPUSummary()
|
||||||
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
|
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
|
||||||
// Log perf result
|
// Log perf result
|
||||||
e2eperf.PrintPerfData(e2eperf.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
|
printPerfData(e2eperf.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
|
||||||
verifyCPULimits(expectedCPU, cpuSummary)
|
verifyCPULimits(expectedCPU, cpuSummary)
|
||||||
|
|
||||||
ginkgo.By("Deleting the RC")
|
ginkgo.By("Deleting the RC")
|
||||||
@ -279,3 +281,12 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// printPerfData prints the perfdata in json format with PerfResultTag prefix.
|
||||||
|
// If an error occurs, nothing will be printed.
|
||||||
|
func printPerfData(p *perftype.PerfData) {
|
||||||
|
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
|
||||||
|
if str := e2emetrics.PrettyPrintJSON(p); str != "" {
|
||||||
|
e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -59,7 +59,7 @@ func dumpDataToFile(data interface{}, labels map[string]string, prefix string) {
|
|||||||
// as "cpu" and "memory". If an error occurs, no perf data will be logged.
|
// as "cpu" and "memory". If an error occurs, no perf data will be logged.
|
||||||
func logPerfData(p *perftype.PerfData, perfType string) {
|
func logPerfData(p *perftype.PerfData, perfType string) {
|
||||||
if framework.TestContext.ReportDir == "" {
|
if framework.TestContext.ReportDir == "" {
|
||||||
e2eperf.PrintPerfData(p)
|
printPerfData(p)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dumpDataToFile(p, p.Labels, "performance-"+perfType)
|
dumpDataToFile(p, p.Labels, "performance-"+perfType)
|
||||||
@ -190,3 +190,12 @@ func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[stri
|
|||||||
"desc": testDesc,
|
"desc": testDesc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// printPerfData prints the perfdata in json format with PerfResultTag prefix.
|
||||||
|
// If an error occurs, nothing will be printed.
|
||||||
|
func printPerfData(p *perftype.PerfData) {
|
||||||
|
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
|
||||||
|
if str := e2emetrics.PrettyPrintJSON(p); str != "" {
|
||||||
|
e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user