Refactor and clean up e2e framework utils, this patch handles test/e2e/framework/perf_util.go

This commit is contained in:
WanLinghao
2019-07-23 10:57:56 +08:00
parent 08f9f2b58a
commit 4c394ddf3b
16 changed files with 222 additions and 154 deletions

View File

@@ -37,8 +37,10 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/perf:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/volume:go_default_library",

View File

@@ -26,7 +26,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@@ -46,8 +48,8 @@ const (
type resourceTest struct {
podsPerNode int
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
cpuLimits e2ekubelet.ContainersCPUSummary
memLimits e2ekubelet.ResourceUsagePerContainer
}
func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
@@ -62,7 +64,7 @@ func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
}
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
expectedCPU map[string]map[float64]float64, expectedMemory e2ekubelet.ResourceUsagePerContainer) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
@@ -107,20 +109,20 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
// TODO(random-liu): Remove the original log when we migrate to new perfdash
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
e2eperf.PrintPerfData(e2eperf.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary()
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
// Log perf result
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
e2eperf.PrintPerfData(e2eperf.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary)
ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
}
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) {
if expected == nil {
return
}
@@ -156,7 +158,7 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
}
}
func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet.NodesCPUSummary) {
if expected == nil {
return
}
@@ -233,25 +235,25 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
rTests := []resourceTest{
{
podsPerNode: 0,
cpuLimits: framework.ContainersCPUSummary{
cpuLimits: e2ekubelet.ContainersCPUSummary{
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.10, 0.95: 0.20},
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.20},
},
memLimits: framework.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
memLimits: e2ekubelet.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
// The detail can be found at https://github.com/kubernetes/kubernetes/issues/28384#issuecomment-244158892
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 125 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 125 * 1024 * 1024},
},
},
{
cpuLimits: framework.ContainersCPUSummary{
cpuLimits: e2ekubelet.ContainersCPUSummary{
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.35, 0.95: 0.50},
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.50},
},
podsPerNode: 100,
memLimits: framework.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 350 * 1024 * 1024},
memLimits: e2ekubelet.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 350 * 1024 * 1024},
},
},
}