From ded8e03fc34ffff3c1196cb5f2dc5f33bc39cc41 Mon Sep 17 00:00:00 2001 From: gmarek Date: Thu, 25 May 2017 11:37:02 +0200 Subject: [PATCH 1/3] Reduce service creation/deletion parallelism in the load test --- test/e2e/framework/kubelet_stats.go | 1 - test/e2e/perf/load.go | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index 9781b931173..d3a25a405c5 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -39,7 +39,6 @@ import ( "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/metrics" - "github.com/golang/glog" "github.com/prometheus/common/model" ) diff --git a/test/e2e/perf/load.go b/test/e2e/perf/load.go index 6f95c864c9e..0fd015cdc7a 100644 --- a/test/e2e/perf/load.go +++ b/test/e2e/perf/load.go @@ -59,6 +59,8 @@ const ( // nodeCountPerNamespace determines how many namespaces we will be using // depending on the number of nodes in the underlying cluster. nodeCountPerNamespace = 100 + // How many threads will be used to create/delete services during this test. + serviceOperationsParallelism = 5 ) var randomKind = schema.GroupKind{Kind: "Random"} @@ -190,7 +192,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { _, err := clientset.Core().Services(services[i].Namespace).Create(services[i]) framework.ExpectNoError(err) } - workqueue.Parallelize(25, len(services), createService) + workqueue.Parallelize(serviceOperationsParallelism, len(services), createService) framework.Logf("%v Services created.", len(services)) defer func(services []*v1.Service) { framework.Logf("Starting to delete services...") @@ -198,7 +200,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { err := clientset.Core().Services(services[i].Namespace).Delete(services[i].Name, nil) framework.ExpectNoError(err) } - workqueue.Parallelize(25, len(services), deleteService) + workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService) framework.Logf("Services deleted") }(services) } else { From 02951f182e73294c672a9782577a4d2905e67e69 Mon Sep 17 00:00:00 2001 From: gmarek Date: Thu, 25 May 2017 11:39:59 +0200 Subject: [PATCH 2/3] Correctly handle nil resource usage in performance e2e tests --- test/e2e/framework/kubelet_stats.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index d3a25a405c5..8a61d204087 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -369,10 +369,8 @@ func getOneTimeResourceUsageOnNode( } f := func(name string, newStats *stats.ContainerStats) *ContainerResourceUsage { - // TODO(gmarek): remove when #46198 is debugged. - if newStats == nil || newStats.CPU == nil { - glog.Warning("NewStats is %#v for container %v", newStats, name) - return &ContainerResourceUsage{} + if newStats == nil || newStats.CPU == nil || newStats.Memory == nil { + return nil } return &ContainerResourceUsage{ Name: name, @@ -401,7 +399,9 @@ func getOneTimeResourceUsageOnNode( if !isInteresting { continue } - usageMap[pod.PodRef.Name+"/"+container.Name] = f(pod.PodRef.Name+"/"+container.Name, &container) + if usage := f(pod.PodRef.Name+"/"+container.Name, &container); usage != nil { + usageMap[pod.PodRef.Name+"/"+container.Name] = usage + } } } return usageMap, nil From 2437cf4d59490c759a91369fc09cb07b26664f27 Mon Sep 17 00:00:00 2001 From: gmarek Date: Thu, 25 May 2017 11:48:01 +0200 Subject: [PATCH 3/3] fix type in start-kubemark --- test/kubemark/start-kubemark.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index d9a63c2f776..e54237146e1 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -303,8 +303,8 @@ current-context: kubemark-context") metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10})) sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" metrics_cpu_per_node_numerator=${NUM_NODES:-10} - metrics_cpu_per_node_denumerator=2 - metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denumerator)) + metrics_cpu_per_node_denominator=2 + metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator)) sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" eventer_mem_per_node=500 eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))