Merge pull request #77532 from WanLinghao/perf_refactor

Refactor and clean up e2e framework utils, this patch handles test/e2e/framework/perf_util.go file
This commit is contained in:
Kubernetes Prow Robot 2019-07-25 13:02:45 -07:00 committed by GitHub
commit 63a43402a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 222 additions and 154 deletions

View File

@ -16,7 +16,6 @@ go_library(
"log_size_monitoring.go",
"networking_utils.go",
"nodes_util.go",
"perf_util.go",
"pods.go",
"profile_gatherer.go",
"provider.go",
@ -95,6 +94,7 @@ go_library(
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
@ -102,7 +102,6 @@ go_library(
"//test/e2e/framework/resource:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
@ -137,10 +136,12 @@ filegroup(
"//test/e2e/framework/gpu:all-srcs",
"//test/e2e/framework/ingress:all-srcs",
"//test/e2e/framework/job:all-srcs",
"//test/e2e/framework/kubelet:all-srcs",
"//test/e2e/framework/lifecycle:all-srcs",
"//test/e2e/framework/log:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/node:all-srcs",
"//test/e2e/framework/perf:all-srcs",
"//test/e2e/framework/pod:all-srcs",
"//test/e2e/framework/podlogs:all-srcs",
"//test/e2e/framework/providers/aws:all-srcs",

View File

@ -0,0 +1,22 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["stats.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/kubelet",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,47 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"time"
)
// ContainerResourceUsage is a structure for gathering container resource usage.
type ContainerResourceUsage struct {
Name string
Timestamp time.Time
CPUUsageInCores float64
MemoryUsageInBytes uint64
MemoryWorkingSetInBytes uint64
MemoryRSSInBytes uint64
// The interval used to calculate CPUUsageInCores.
CPUInterval time.Duration
}
// ResourceUsagePerContainer is map of ContainerResourceUsage
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
// ResourceUsagePerNode is map of ResourceUsagePerContainer.
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
// ContainersCPUSummary is indexed by the container name with each entry a
// (percentile, value) map.
type ContainersCPUSummary map[string]map[float64]float64
// NodesCPUSummary is indexed by the node name with each entry a
// ContainersCPUSummary map.
type NodesCPUSummary map[string]ContainersCPUSummary

View File

@ -38,6 +38,7 @@ import (
dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/master/ports"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -345,7 +346,7 @@ func getOneTimeResourceUsageOnNode(
nodeName string,
cpuInterval time.Duration,
containerNames func() []string,
) (ResourceUsagePerContainer, error) {
) (e2ekubelet.ResourceUsagePerContainer, error) {
const (
// cadvisor records stats about every second.
cadvisorStatsPollingIntervalInSeconds float64 = 1.0
@ -363,11 +364,11 @@ func getOneTimeResourceUsageOnNode(
return nil, err
}
f := func(name string, newStats *kubeletstatsv1alpha1.ContainerStats) *ContainerResourceUsage {
f := func(name string, newStats *kubeletstatsv1alpha1.ContainerStats) *e2ekubelet.ContainerResourceUsage {
if newStats == nil || newStats.CPU == nil || newStats.Memory == nil {
return nil
}
return &ContainerResourceUsage{
return &e2ekubelet.ContainerResourceUsage{
Name: name,
Timestamp: newStats.StartTime.Time,
CPUUsageInCores: float64(removeUint64Ptr(newStats.CPU.UsageNanoCores)) / 1000000000,
@ -379,7 +380,7 @@ func getOneTimeResourceUsageOnNode(
}
// Process container infos that are relevant to us.
containers := containerNames()
usageMap := make(ResourceUsagePerContainer, len(containers))
usageMap := make(e2ekubelet.ResourceUsagePerContainer, len(containers))
observedContainers := []string{}
for _, pod := range summary.Pods {
for _, container := range pod.Containers {
@ -452,29 +453,7 @@ func TargetContainers() []string {
}
}
// ContainerResourceUsage is a structure for gathering container resource usage.
type ContainerResourceUsage struct {
Name string
Timestamp time.Time
CPUUsageInCores float64
MemoryUsageInBytes uint64
MemoryWorkingSetInBytes uint64
MemoryRSSInBytes uint64
// The interval used to calculate CPUUsageInCores.
CPUInterval time.Duration
}
func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsage) bool {
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
}
// ResourceUsagePerContainer is map of ContainerResourceUsage
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
// ResourceUsagePerNode is map of ResourceUsagePerContainer.
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
func formatResourceUsageStats(nodeName string, containerStats e2ekubelet.ResourceUsagePerContainer) string {
// Example output:
//
// Resource usage for node "e2e-test-foo-node-abcde":
@ -538,8 +517,8 @@ func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
}
}
func computeContainerResourceUsage(name string, oldStats, newStats *kubeletstatsv1alpha1.ContainerStats) *ContainerResourceUsage {
return &ContainerResourceUsage{
func computeContainerResourceUsage(name string, oldStats, newStats *kubeletstatsv1alpha1.ContainerStats) *e2ekubelet.ContainerResourceUsage {
return &e2ekubelet.ContainerResourceUsage{
Name: name,
Timestamp: newStats.CPU.Time.Time,
CPUUsageInCores: float64(*newStats.CPU.UsageCoreNanoSeconds-*oldStats.CPU.UsageCoreNanoSeconds) / float64(newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time).Nanoseconds()),
@ -558,13 +537,13 @@ type resourceCollector struct {
node string
containers []string
client clientset.Interface
buffers map[string][]*ContainerResourceUsage
buffers map[string][]*e2ekubelet.ContainerResourceUsage
pollingInterval time.Duration
stopCh chan struct{}
}
func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
buffers := make(map[string][]*ContainerResourceUsage)
buffers := make(map[string][]*e2ekubelet.ContainerResourceUsage)
return &resourceCollector{
node: nodeName,
containers: containerNames,
@ -620,10 +599,10 @@ func (r *resourceCollector) collectStats(oldStatsMap map[string]*kubeletstatsv1a
}
}
func (r *resourceCollector) GetLatest() (ResourceUsagePerContainer, error) {
func (r *resourceCollector) GetLatest() (e2ekubelet.ResourceUsagePerContainer, error) {
r.lock.RLock()
defer r.lock.RUnlock()
kubeletstatsv1alpha1 := make(ResourceUsagePerContainer)
kubeletstatsv1alpha1 := make(e2ekubelet.ResourceUsagePerContainer)
for _, name := range r.containers {
contStats, ok := r.buffers[name]
if !ok || len(contStats) == 0 {
@ -639,11 +618,11 @@ func (r *resourceCollector) Reset() {
r.lock.Lock()
defer r.lock.Unlock()
for _, name := range r.containers {
r.buffers[name] = []*ContainerResourceUsage{}
r.buffers[name] = []*e2ekubelet.ContainerResourceUsage{}
}
}
type resourceUsageByCPU []*ContainerResourceUsage
type resourceUsageByCPU []*e2ekubelet.ContainerResourceUsage
func (r resourceUsageByCPU) Len() int { return len(r) }
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
@ -729,7 +708,7 @@ func (r *ResourceMonitor) LogLatest() {
// FormatResourceUsage returns the formatted string for LogLatest().
// TODO(oomichi): This can be made to local function after making test/e2e/node/kubelet_perf.go use LogLatest directly instead.
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
func (r *ResourceMonitor) FormatResourceUsage(s e2ekubelet.ResourceUsagePerNode) string {
summary := []string{}
for node, usage := range s {
summary = append(summary, formatResourceUsageStats(node, usage))
@ -738,8 +717,8 @@ func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
}
// GetLatest returns the latest resource usage.
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
result := make(ResourceUsagePerNode)
func (r *ResourceMonitor) GetLatest() (e2ekubelet.ResourceUsagePerNode, error) {
result := make(e2ekubelet.ResourceUsagePerNode)
errs := []error{}
for key, collector := range r.collectors {
s, err := collector.GetLatest()
@ -753,10 +732,10 @@ func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
}
// GetMasterNodeLatest returns the latest resource usage of master and node.
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
result := make(ResourceUsagePerNode)
var masterUsage ResourceUsagePerContainer
var nodesUsage []ResourceUsagePerContainer
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode e2ekubelet.ResourceUsagePerNode) e2ekubelet.ResourceUsagePerNode {
result := make(e2ekubelet.ResourceUsagePerNode)
var masterUsage e2ekubelet.ResourceUsagePerContainer
var nodesUsage []e2ekubelet.ResourceUsagePerContainer
for node, usage := range usagePerNode {
if strings.HasSuffix(node, "master") {
masterUsage = usage
@ -764,11 +743,11 @@ func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode)
nodesUsage = append(nodesUsage, usage)
}
}
nodeAvgUsage := make(ResourceUsagePerContainer)
nodeAvgUsage := make(e2ekubelet.ResourceUsagePerContainer)
for _, nodeUsage := range nodesUsage {
for c, usage := range nodeUsage {
if _, found := nodeAvgUsage[c]; !found {
nodeAvgUsage[c] = &ContainerResourceUsage{Name: usage.Name}
nodeAvgUsage[c] = &e2ekubelet.ContainerResourceUsage{Name: usage.Name}
}
nodeAvgUsage[c].CPUUsageInCores += usage.CPUUsageInCores
nodeAvgUsage[c].MemoryUsageInBytes += usage.MemoryUsageInBytes
@ -787,16 +766,8 @@ func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode)
return result
}
// ContainersCPUSummary is indexed by the container name with each entry a
// (percentile, value) map.
type ContainersCPUSummary map[string]map[float64]float64
// NodesCPUSummary is indexed by the node name with each entry a
// ContainersCPUSummary map.
type NodesCPUSummary map[string]ContainersCPUSummary
// FormatCPUSummary returns the string of human-readable CPU summary from the specified summary data.
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
func (r *ResourceMonitor) FormatCPUSummary(summary e2ekubelet.NodesCPUSummary) string {
// Example output for a node (the percentiles may differ):
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
// container 5th% 50th% 90th% 95th%
@ -840,10 +811,10 @@ func (r *ResourceMonitor) LogCPUSummary() {
}
// GetCPUSummary returns summary of CPU.
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
result := make(NodesCPUSummary)
func (r *ResourceMonitor) GetCPUSummary() e2ekubelet.NodesCPUSummary {
result := make(e2ekubelet.NodesCPUSummary)
for nodeName, collector := range r.collectors {
result[nodeName] = make(ContainersCPUSummary)
result[nodeName] = make(e2ekubelet.ContainersCPUSummary)
for _, containerName := range TargetContainers() {
data := collector.GetBasicCPUStats(containerName)
result[nodeName][containerName] = data
@ -853,10 +824,10 @@ func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
}
// GetMasterNodeCPUSummary returns summary of master node CPUs.
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
result := make(NodesCPUSummary)
var masterSummary ContainersCPUSummary
var nodesSummaries []ContainersCPUSummary
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode e2ekubelet.NodesCPUSummary) e2ekubelet.NodesCPUSummary {
result := make(e2ekubelet.NodesCPUSummary)
var masterSummary e2ekubelet.ContainersCPUSummary
var nodesSummaries []e2ekubelet.ContainersCPUSummary
for node, summary := range summaryPerNode {
if strings.HasSuffix(node, "master") {
masterSummary = summary
@ -865,7 +836,7 @@ func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary
}
}
nodeAvgSummary := make(ContainersCPUSummary)
nodeAvgSummary := make(e2ekubelet.ContainersCPUSummary)
for _, nodeSummary := range nodesSummaries {
for c, summary := range nodeSummary {
if _, found := nodeAvgSummary[c]; !found {

View File

@ -0,0 +1,28 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["perf.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/perf",
visibility = ["//visibility:public"],
deps = [
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/perftype:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -19,29 +19,12 @@ package framework
import (
"fmt"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/perftype"
)
// TODO(random-liu): Change the tests to actually use PerfData from the beginning instead of
// translating one to the other here.
func latencyToPerfData(l e2emetrics.LatencyMetric, name string) perftype.DataItem {
return perftype.DataItem{
Data: map[string]float64{
"Perc50": float64(l.Perc50) / 1000000, // us -> ms
"Perc90": float64(l.Perc90) / 1000000,
"Perc99": float64(l.Perc99) / 1000000,
"Perc100": float64(l.Perc100) / 1000000,
},
Unit: "ms",
Labels: map[string]string{
"Metric": name,
},
}
}
// CurrentKubeletPerfMetricsVersion is the current kubelet performance metrics
// version. This is used by mutiple perf related data structures. We should
// bump up the version each time we make an incompatible change to the metrics.
@ -49,12 +32,12 @@ const CurrentKubeletPerfMetricsVersion = "v2"
// ResourceUsageToPerfData transforms ResourceUsagePerNode to PerfData. Notice that this function
// only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfData {
func ResourceUsageToPerfData(usagePerNode e2ekubelet.ResourceUsagePerNode) *perftype.PerfData {
return ResourceUsageToPerfDataWithLabels(usagePerNode, nil)
}
// CPUUsageToPerfData transforms NodesCPUSummary to PerfData.
func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
func CPUUsageToPerfData(usagePerNode e2ekubelet.NodesCPUSummary) *perftype.PerfData {
return CPUUsageToPerfDataWithLabels(usagePerNode, nil)
}
@ -69,7 +52,7 @@ func PrintPerfData(p *perftype.PerfData) {
// ResourceUsageToPerfDataWithLabels transforms ResourceUsagePerNode to PerfData with additional labels.
// Notice that this function only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
func ResourceUsageToPerfDataWithLabels(usagePerNode ResourceUsagePerNode, labels map[string]string) *perftype.PerfData {
func ResourceUsageToPerfDataWithLabels(usagePerNode e2ekubelet.ResourceUsagePerNode, labels map[string]string) *perftype.PerfData {
items := []perftype.DataItem{}
for node, usages := range usagePerNode {
for c, usage := range usages {
@ -98,7 +81,7 @@ func ResourceUsageToPerfDataWithLabels(usagePerNode ResourceUsagePerNode, labels
}
// CPUUsageToPerfDataWithLabels transforms NodesCPUSummary to PerfData with additional labels.
func CPUUsageToPerfDataWithLabels(usagePerNode NodesCPUSummary, labels map[string]string) *perftype.PerfData {
func CPUUsageToPerfDataWithLabels(usagePerNode e2ekubelet.NodesCPUSummary, labels map[string]string) *perftype.PerfData {
items := []perftype.DataItem{}
for node, usages := range usagePerNode {
for c, usage := range usages {

View File

@ -32,6 +32,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/util/system"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)
@ -81,9 +82,9 @@ func (s *ResourceUsageSummary) SummaryKind() string {
return "ResourceUsageSummary"
}
func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCompute []int) map[int]ResourceUsagePerContainer {
func computePercentiles(timeSeries []e2ekubelet.ResourceUsagePerContainer, percentilesToCompute []int) map[int]e2ekubelet.ResourceUsagePerContainer {
if len(timeSeries) == 0 {
return make(map[int]ResourceUsagePerContainer)
return make(map[int]e2ekubelet.ResourceUsagePerContainer)
}
dataMap := make(map[string]*usageDataPerContainer)
for i := range timeSeries {
@ -106,12 +107,12 @@ func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCom
sort.Sort(uint64arr(v.memWorkSetData))
}
result := make(map[int]ResourceUsagePerContainer)
result := make(map[int]e2ekubelet.ResourceUsagePerContainer)
for _, perc := range percentilesToCompute {
data := make(ResourceUsagePerContainer)
data := make(e2ekubelet.ResourceUsagePerContainer)
for k, v := range dataMap {
percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1
data[k] = &ContainerResourceUsage{
data[k] = &e2ekubelet.ContainerResourceUsage{
Name: k,
CPUUsageInCores: v.cpuData[percentileIndex],
MemoryUsageInBytes: v.memUseData[percentileIndex],
@ -123,8 +124,8 @@ func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCom
return result
}
func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]ResourceUsagePerContainer {
result := make(map[int]ResourceUsagePerContainer)
func leftMergeData(left, right map[int]e2ekubelet.ResourceUsagePerContainer) map[int]e2ekubelet.ResourceUsagePerContainer {
result := make(map[int]e2ekubelet.ResourceUsagePerContainer)
for percentile, data := range left {
result[percentile] = data
if _, ok := right[percentile]; !ok {
@ -143,7 +144,7 @@ type resourceGatherWorker struct {
wg *sync.WaitGroup
containerIDs []string
stopCh chan struct{}
dataSeries []ResourceUsagePerContainer
dataSeries []e2ekubelet.ResourceUsagePerContainer
finished bool
inKubemark bool
resourceDataGatheringPeriod time.Duration
@ -152,14 +153,14 @@ type resourceGatherWorker struct {
}
func (w *resourceGatherWorker) singleProbe() {
data := make(ResourceUsagePerContainer)
data := make(e2ekubelet.ResourceUsagePerContainer)
if w.inKubemark {
kubemarkData := GetKubemarkMasterComponentsResourceUsage()
if data == nil {
return
}
for k, v := range kubemarkData {
data[k] = &ContainerResourceUsage{
data[k] = &e2ekubelet.ContainerResourceUsage{
Name: v.Name,
MemoryWorkingSetInBytes: v.MemoryWorkingSetInBytes,
CPUUsageInCores: v.CPUUsageInCores,
@ -355,7 +356,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
e2elog.Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
}
data := make(map[int]ResourceUsagePerContainer)
data := make(map[int]e2ekubelet.ResourceUsagePerContainer)
for i := range g.workers {
if g.workers[i].finished {
stats := computePercentiles(g.workers[i].dataSeries, percentiles)

View File

@ -37,8 +37,10 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/perf:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/volume:go_default_library",

View File

@ -26,7 +26,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -46,8 +48,8 @@ const (
type resourceTest struct {
podsPerNode int
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
cpuLimits e2ekubelet.ContainersCPUSummary
memLimits e2ekubelet.ResourceUsagePerContainer
}
func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
@ -62,7 +64,7 @@ func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
}
func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames sets.String, rm *framework.ResourceMonitor,
expectedCPU map[string]map[float64]float64, expectedMemory framework.ResourceUsagePerContainer) {
expectedCPU map[string]map[float64]float64, expectedMemory e2ekubelet.ResourceUsagePerContainer) {
numNodes := nodeNames.Len()
totalPods := podsPerNode * numNodes
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
@ -107,20 +109,20 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
// TODO(random-liu): Remove the original log when we migrate to new perfdash
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
e2eperf.PrintPerfData(e2eperf.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary()
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
// Log perf result
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
e2eperf.PrintPerfData(e2eperf.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary)
ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
}
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) {
if expected == nil {
return
}
@ -156,7 +158,7 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
}
}
func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet.NodesCPUSummary) {
if expected == nil {
return
}
@ -233,25 +235,25 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
rTests := []resourceTest{
{
podsPerNode: 0,
cpuLimits: framework.ContainersCPUSummary{
cpuLimits: e2ekubelet.ContainersCPUSummary{
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.10, 0.95: 0.20},
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.20},
},
memLimits: framework.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
memLimits: e2ekubelet.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
// The detail can be found at https://github.com/kubernetes/kubernetes/issues/28384#issuecomment-244158892
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 125 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 125 * 1024 * 1024},
},
},
{
cpuLimits: framework.ContainersCPUSummary{
cpuLimits: e2ekubelet.ContainersCPUSummary{
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.35, 0.95: 0.50},
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.50},
},
podsPerNode: 100,
memLimits: framework.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 350 * 1024 * 1024},
memLimits: e2ekubelet.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 350 * 1024 * 1024},
},
},
}

View File

@ -31,6 +31,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -84,8 +85,8 @@ type densityTest struct {
// API QPS limit
APIQPSLimit int
// performance limits
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
cpuLimits e2ekubelet.ContainersCPUSummary
memLimits e2ekubelet.ResourceUsagePerContainer
podStartupLimits e2emetrics.LatencyMetric
podBatchStartupLimit time.Duration
}

View File

@ -76,6 +76,8 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/perf:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/e2e_node/perftype:go_default_library",
"//test/utils:go_default_library",
@ -193,6 +195,8 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/perf:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/generated:go_default_library",
"//vendor/github.com/onsi/ginkgo/config:go_default_library",

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
"k8s.io/kubernetes/test/e2e/perftype"
nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype"
)
@ -58,7 +59,7 @@ func dumpDataToFile(data interface{}, labels map[string]string, prefix string) {
// as "cpu" and "memory". If an error occurs, no perf data will be logged.
func logPerfData(p *perftype.PerfData, perfType string) {
if framework.TestContext.ReportDir == "" {
framework.PrintPerfData(p)
e2eperf.PrintPerfData(p)
return
}
dumpDataToFile(p, p.Labels, "performance-"+perfType)
@ -71,7 +72,7 @@ func logPerfData(p *perftype.PerfData, perfType string) {
func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]metav1.Time, testInfo map[string]string) {
timeSeries := &nodeperftype.NodeTimeSeries{
Labels: testInfo,
Version: framework.CurrentKubeletPerfMetricsVersion,
Version: e2eperf.CurrentKubeletPerfMetricsVersion,
}
// Attach operation time series.
timeSeries.OperationData = map[string][]int64{
@ -108,7 +109,7 @@ func getCumulatedPodTimeSeries(timePerPod map[string]metav1.Time) []int64 {
// getLatencyPerfData returns perf data of pod startup latency.
func getLatencyPerfData(latency e2emetrics.LatencyMetric, testInfo map[string]string) *perftype.PerfData {
return &perftype.PerfData{
Version: framework.CurrentKubeletPerfMetricsVersion,
Version: e2eperf.CurrentKubeletPerfMetricsVersion,
DataItems: []perftype.DataItem{
{
Data: map[string]float64{
@ -131,7 +132,7 @@ func getLatencyPerfData(latency e2emetrics.LatencyMetric, testInfo map[string]st
// getThroughputPerfData returns perf data of pod creation startup throughput.
func getThroughputPerfData(batchLag time.Duration, e2eLags []e2emetrics.PodLatencyData, podsNr int, testInfo map[string]string) *perftype.PerfData {
return &perftype.PerfData{
Version: framework.CurrentKubeletPerfMetricsVersion,
Version: e2eperf.CurrentKubeletPerfMetricsVersion,
DataItems: []perftype.DataItem{
{
Data: map[string]float64{

View File

@ -35,6 +35,7 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -76,13 +77,13 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
{
podsNr: 10,
interval: 0 * time.Millisecond,
cpuLimits: framework.ContainersCPUSummary{
cpuLimits: e2ekubelet.ContainersCPUSummary{
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.50},
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.40, 0.95: 0.60},
},
memLimits: framework.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
memLimits: e2ekubelet.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
},
// percentile limit of single pod startup latency
podStartupLimits: e2emetrics.LatencyMetric{
@ -223,13 +224,13 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
{
podsNr: 10,
bgPodsNr: 50,
cpuLimits: framework.ContainersCPUSummary{
cpuLimits: e2ekubelet.ContainersCPUSummary{
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.50},
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.40, 0.95: 0.60},
},
memLimits: framework.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
memLimits: e2ekubelet.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
},
podStartupLimits: e2emetrics.LatencyMetric{
Perc50: 5000 * time.Millisecond,
@ -302,8 +303,8 @@ type densityTest struct {
// API QPS limit
APIQPSLimit int
// performance limits
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
cpuLimits e2ekubelet.ContainersCPUSummary
memLimits e2ekubelet.ResourceUsagePerContainer
podStartupLimits e2emetrics.LatencyMetric
podBatchStartupLimit time.Duration
}

View File

@ -43,6 +43,7 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/util/procfs"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e_node/perftype"
@ -69,7 +70,7 @@ type ResourceCollector struct {
request *cadvisorapiv2.RequestOptions
pollingInterval time.Duration
buffers map[string][]*framework.ContainerResourceUsage
buffers map[string][]*e2ekubelet.ContainerResourceUsage
lock sync.RWMutex
stopCh chan struct{}
}
@ -77,7 +78,7 @@ type ResourceCollector struct {
// NewResourceCollector creates a resource collector object which collects
// resource usage periodically from Cadvisor
func NewResourceCollector(interval time.Duration) *ResourceCollector {
buffers := make(map[string][]*framework.ContainerResourceUsage)
buffers := make(map[string][]*e2ekubelet.ContainerResourceUsage)
return &ResourceCollector{
pollingInterval: interval,
buffers: buffers,
@ -127,13 +128,13 @@ func (r *ResourceCollector) Reset() {
r.lock.Lock()
defer r.lock.Unlock()
for _, name := range systemContainers {
r.buffers[name] = []*framework.ContainerResourceUsage{}
r.buffers[name] = []*e2ekubelet.ContainerResourceUsage{}
}
}
// GetCPUSummary gets CPU usage in percentile.
func (r *ResourceCollector) GetCPUSummary() framework.ContainersCPUSummary {
result := make(framework.ContainersCPUSummary)
func (r *ResourceCollector) GetCPUSummary() e2ekubelet.ContainersCPUSummary {
result := make(e2ekubelet.ContainersCPUSummary)
for key, name := range systemContainers {
data := r.GetBasicCPUStats(name)
result[key] = data
@ -174,8 +175,8 @@ func (r *ResourceCollector) collectStats(oldStatsMap map[string]*cadvisorapiv2.C
}
// computeContainerResourceUsage computes resource usage based on new data sample.
func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv2.ContainerStats) *framework.ContainerResourceUsage {
return &framework.ContainerResourceUsage{
func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv2.ContainerStats) *e2ekubelet.ContainerResourceUsage {
return &e2ekubelet.ContainerResourceUsage{
Name: name,
Timestamp: newStats.Timestamp,
CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()),
@ -187,10 +188,10 @@ func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv
}
// GetLatest gets the latest resource usage from stats buffer.
func (r *ResourceCollector) GetLatest() (framework.ResourceUsagePerContainer, error) {
func (r *ResourceCollector) GetLatest() (e2ekubelet.ResourceUsagePerContainer, error) {
r.lock.RLock()
defer r.lock.RUnlock()
kubeletstatsv1alpha1 := make(framework.ResourceUsagePerContainer)
kubeletstatsv1alpha1 := make(e2ekubelet.ResourceUsagePerContainer)
for key, name := range systemContainers {
contStats, ok := r.buffers[name]
if !ok || len(contStats) == 0 {
@ -201,7 +202,7 @@ func (r *ResourceCollector) GetLatest() (framework.ResourceUsagePerContainer, er
return kubeletstatsv1alpha1, nil
}
type resourceUsageByCPU []*framework.ContainerResourceUsage
type resourceUsageByCPU []*e2ekubelet.ContainerResourceUsage
func (r resourceUsageByCPU) Len() int { return len(r) }
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
@ -218,7 +219,7 @@ func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]f
result := make(map[float64]float64, len(percentiles))
// We must make a copy of array, otherwise the timeseries order is changed.
usages := make([]*framework.ContainerResourceUsage, 0)
usages := make([]*e2ekubelet.ContainerResourceUsage, 0)
usages = append(usages, r.buffers[containerName]...)
sort.Sort(resourceUsageByCPU(usages))
@ -234,7 +235,7 @@ func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]f
return result
}
func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer) string {
func formatResourceUsageStats(containerStats e2ekubelet.ResourceUsagePerContainer) string {
// Example output:
//
// Resource usage:
@ -252,7 +253,7 @@ func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer
return fmt.Sprintf("Resource usage:\n%s", buf.String())
}
func formatCPUSummary(summary framework.ContainersCPUSummary) string {
func formatCPUSummary(summary e2ekubelet.ContainersCPUSummary) string {
// Example output for a node (the percentiles may differ):
// CPU usage of containers:
// container 5th% 50th% 90th% 95th%

View File

@ -26,7 +26,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
@ -67,13 +69,13 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
rTests := []resourceTest{
{
podsNr: 10,
cpuLimits: framework.ContainersCPUSummary{
cpuLimits: e2ekubelet.ContainersCPUSummary{
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.35},
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.30, 0.95: 0.40},
},
memLimits: framework.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 400 * 1024 * 1024},
memLimits: e2ekubelet.ResourceUsagePerContainer{
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 200 * 1024 * 1024},
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 400 * 1024 * 1024},
},
},
}
@ -125,8 +127,8 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
type resourceTest struct {
podsNr int
cpuLimits framework.ContainersCPUSummary
memLimits framework.ResourceUsagePerContainer
cpuLimits e2ekubelet.ContainersCPUSummary
memLimits e2ekubelet.ResourceUsagePerContainer
}
func (rt *resourceTest) getTestName() string {
@ -183,8 +185,8 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
}
// logAndVerifyResource prints the resource usage as perf data and verifies whether resource usage satisfies the limit.
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits framework.ContainersCPUSummary,
memLimits framework.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) {
func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimits e2ekubelet.ContainersCPUSummary,
memLimits e2ekubelet.ResourceUsagePerContainer, testInfo map[string]string, isVerify bool) {
nodeName := framework.TestContext.NodeName
// Obtain memory PerfData
@ -192,19 +194,19 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
framework.ExpectNoError(err)
e2elog.Logf("%s", formatResourceUsageStats(usagePerContainer))
usagePerNode := make(framework.ResourceUsagePerNode)
usagePerNode := make(e2ekubelet.ResourceUsagePerNode)
usagePerNode[nodeName] = usagePerContainer
// Obtain CPU PerfData
cpuSummary := rc.GetCPUSummary()
e2elog.Logf("%s", formatCPUSummary(cpuSummary))
cpuSummaryPerNode := make(framework.NodesCPUSummary)
cpuSummaryPerNode := make(e2ekubelet.NodesCPUSummary)
cpuSummaryPerNode[nodeName] = cpuSummary
// Print resource usage
logPerfData(framework.ResourceUsageToPerfDataWithLabels(usagePerNode, testInfo), "memory")
logPerfData(framework.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode, testInfo), "cpu")
logPerfData(e2eperf.ResourceUsageToPerfDataWithLabels(usagePerNode, testInfo), "memory")
logPerfData(e2eperf.CPUUsageToPerfDataWithLabels(cpuSummaryPerNode, testInfo), "cpu")
// Verify resource usage
if isVerify {
@ -213,7 +215,7 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
}
}
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) {
if expected == nil {
return
}
@ -249,7 +251,7 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
}
}
func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.NodesCPUSummary) {
func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet.NodesCPUSummary) {
if expected == nil {
return
}