diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index 25fed0969fe..fc065ad9721 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -39,7 +39,6 @@ import ( "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/metrics" - "github.com/golang/glog" "github.com/prometheus/common/model" ) @@ -367,10 +366,8 @@ func getOneTimeResourceUsageOnNode( } f := func(name string, newStats *stats.ContainerStats) *ContainerResourceUsage { - // TODO(gmarek): remove when #46198 is debugged. - if newStats == nil || newStats.CPU == nil { - glog.Warning("NewStats is %#v for container %v", newStats, name) - return &ContainerResourceUsage{} + if newStats == nil || newStats.CPU == nil || newStats.Memory == nil { + return nil } return &ContainerResourceUsage{ Name: name, @@ -399,7 +396,9 @@ func getOneTimeResourceUsageOnNode( if !isInteresting { continue } - usageMap[pod.PodRef.Name+"/"+container.Name] = f(pod.PodRef.Name+"/"+container.Name, &container) + if usage := f(pod.PodRef.Name+"/"+container.Name, &container); usage != nil { + usageMap[pod.PodRef.Name+"/"+container.Name] = usage + } } } return usageMap, nil diff --git a/test/e2e/perf/load.go b/test/e2e/perf/load.go index 6f95c864c9e..0fd015cdc7a 100644 --- a/test/e2e/perf/load.go +++ b/test/e2e/perf/load.go @@ -59,6 +59,8 @@ const ( // nodeCountPerNamespace determines how many namespaces we will be using // depending on the number of nodes in the underlying cluster. nodeCountPerNamespace = 100 + // How many threads will be used to create/delete services during this test. + serviceOperationsParallelism = 5 ) var randomKind = schema.GroupKind{Kind: "Random"} @@ -190,7 +192,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { _, err := clientset.Core().Services(services[i].Namespace).Create(services[i]) framework.ExpectNoError(err) } - workqueue.Parallelize(25, len(services), createService) + workqueue.Parallelize(serviceOperationsParallelism, len(services), createService) framework.Logf("%v Services created.", len(services)) defer func(services []*v1.Service) { framework.Logf("Starting to delete services...") @@ -198,7 +200,7 @@ var _ = framework.KubeDescribe("Load capacity", func() { err := clientset.Core().Services(services[i].Namespace).Delete(services[i].Name, nil) framework.ExpectNoError(err) } - workqueue.Parallelize(25, len(services), deleteService) + workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService) framework.Logf("Services deleted") }(services) } else { diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index d9a63c2f776..e54237146e1 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -303,8 +303,8 @@ current-context: kubemark-context") metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10})) sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" metrics_cpu_per_node_numerator=${NUM_NODES:-10} - metrics_cpu_per_node_denumerator=2 - metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denumerator)) + metrics_cpu_per_node_denominator=2 + metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator)) sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json" eventer_mem_per_node=500 eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))