From a4b3f473472e008f3cdf26079baccdb84d1df794 Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 19 Apr 2016 15:03:48 +0200 Subject: [PATCH] Add RC and container pors to scheduler benchmark --- .../algorithm/priorities/node_affinity.go | 1 - .../scheduler/perf/scheduler_bench_test.go | 4 +- .../scheduler/perf/scheduler_test.go | 2 +- .../scheduler/perf/test-performance.sh | 2 +- test/component/scheduler/perf/util.go | 96 ++++++++++--------- 5 files changed, 56 insertions(+), 49 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go index 671cc98788c..d285096aab2 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -42,7 +42,6 @@ func NewNodeAffinityPriority(nodeLister algorithm.NodeLister) algorithm.Priority // the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher // score the node gets. func (s *NodeAffinity) CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) { - var maxCount int counts := map[string]int{} diff --git a/test/component/scheduler/perf/scheduler_bench_test.go b/test/component/scheduler/perf/scheduler_bench_test.go index 202cbca58e1..399be109c55 100644 --- a/test/component/scheduler/perf/scheduler_bench_test.go +++ b/test/component/scheduler/perf/scheduler_bench_test.go @@ -54,7 +54,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) { c := schedulerConfigFactory.Client makeNodes(c, numNodes) - makePods(c, numScheduledPods) + makePodsFromRC(c, "rc1", numScheduledPods) for { scheduled := schedulerConfigFactory.ScheduledPodLister.Store.List() if len(scheduled) >= numScheduledPods { @@ -64,7 +64,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) { } // start benchmark b.ResetTimer() - makePods(c, b.N) + makePodsFromRC(c, "rc2", b.N) for { // This can potentially affect performance of scheduler, since List() is done under mutex. // TODO: Setup watch on apiserver and wait until all pods scheduled. diff --git a/test/component/scheduler/perf/scheduler_test.go b/test/component/scheduler/perf/scheduler_test.go index 5c54851a64c..954fad75a55 100644 --- a/test/component/scheduler/perf/scheduler_test.go +++ b/test/component/scheduler/perf/scheduler_test.go @@ -42,7 +42,7 @@ func schedulePods(numNodes, numPods int) { c := schedulerConfigFactory.Client makeNodes(c, numNodes) - makePods(c, numPods) + makePodsFromRC(c, "rc1", numPods) prev := 0 start := time.Now() diff --git a/test/component/scheduler/perf/test-performance.sh b/test/component/scheduler/perf/test-performance.sh index 5c387a8f1ea..aa9a47e0d79 100755 --- a/test/component/scheduler/perf/test-performance.sh +++ b/test/component/scheduler/perf/test-performance.sh @@ -42,8 +42,8 @@ kube::log::status "performance test start" if ${RUN_BENCHMARK:-false}; then go test -c -o "perf.test" "./perf.test" -test.bench=. -test.run=xxxx -test.cpuprofile=prof.out + kube::log::status "benchmark tests finished" fi -kube::log::status "benchmark tests finished" # Running density tests. It might take a long time. go test -test.run=. -test.timeout=60m kube::log::status "density tests finished" diff --git a/test/component/scheduler/perf/util.go b/test/component/scheduler/perf/util.go index 7e4431b1409..0a338182418 100644 --- a/test/component/scheduler/perf/util.go +++ b/test/component/scheduler/perf/util.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/client/restclient" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/master" + "k8s.io/kubernetes/pkg/util/workqueue" "k8s.io/kubernetes/plugin/pkg/scheduler" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" "k8s.io/kubernetes/plugin/pkg/scheduler/factory" @@ -92,7 +93,7 @@ func makeNodes(c client.Interface, nodeCount int) { }, Status: api.NodeStatus{ Capacity: api.ResourceList{ - api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), + api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), api.ResourceCPU: resource.MustParse("4"), api.ResourceMemory: resource.MustParse("32Gi"), }, @@ -109,54 +110,61 @@ func makeNodes(c client.Interface, nodeCount int) { } } -// makePods will setup specified number of scheduled pods. -// Currently it goes through scheduling path and it's very slow to setup large number of pods. -// TODO: Setup pods evenly on all nodes and quickly/non-linearly. -func makePods(c client.Interface, podCount int) { - glog.Infof("making %d pods", podCount) +func makePodSpec() api.PodSpec { + return api.PodSpec{ + Containers: []api.Container{{ + Name: "pause", + Image: "gcr.io/google_containers/pause:1.0", + Ports: []api.ContainerPort{{ContainerPort: 80}}, + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("500Mi"), + }, + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("100m"), + api.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + }}, + } +} + +// makePodsFromRC will create a ReplicationController object and +// a given number of pods (imitating the controller). +func makePodsFromRC(c client.Interface, name string, podCount int) { + rc := &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: podCount, + Selector: map[string]string{"name": name}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"name": name}, + }, + Spec: makePodSpec(), + }, + }, + } + if _, err := c.ReplicationControllers("default").Create(rc); err != nil { + glog.Fatalf("unexpected error: %v", err) + } + basePod := &api.Pod{ ObjectMeta: api.ObjectMeta{ GenerateName: "scheduler-test-pod-", + Labels: map[string]string{"name": name}, }, - Spec: api.PodSpec{ - Containers: []api.Container{{ - Name: "pause", - Image: "gcr.io/google_containers/pause:1.0", - Resources: api.ResourceRequirements{ - Limits: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("500Mi"), - }, - Requests: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("500Mi"), - }, - }, - }}, - }, + Spec: makePodSpec(), } - threads := 30 - remaining := make(chan int, 1000) - go func() { - for i := 0; i < podCount; i++ { - remaining <- i - } - close(remaining) - }() - for i := 0; i < threads; i++ { - go func() { - for { - _, ok := <-remaining - if !ok { - return - } - for { - _, err := c.Pods("default").Create(basePod) - if err == nil { - break - } - } + createPod := func(i int) { + for { + if _, err := c.Pods("default").Create(basePod); err == nil { + break } - }() + } } + workqueue.Parallelize(30, podCount, createPod) }