Merge pull request #24524 from wojtek-t/fix_scheduler_2

Automatic merge from submit-queue

Add RC and container pors to scheduler benchmark

Fix #23263

Ref  #24408
However - scheduler throughput is still ~140 initially, whereas in reality we have 35-40. There are still significant difference we should understand.

@hongchaodeng @xiang90
This commit is contained in:
k8s-merge-robot 2016-04-20 07:18:20 -07:00
commit f3f6ffaa28
5 changed files with 56 additions and 49 deletions

View File

@ -42,7 +42,6 @@ func NewNodeAffinityPriority(nodeLister algorithm.NodeLister) algorithm.Priority
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
// score the node gets.
func (s *NodeAffinity) CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
var maxCount int
counts := map[string]int{}

View File

@ -54,7 +54,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
c := schedulerConfigFactory.Client
makeNodes(c, numNodes)
makePods(c, numScheduledPods)
makePodsFromRC(c, "rc1", numScheduledPods)
for {
scheduled := schedulerConfigFactory.ScheduledPodLister.Store.List()
if len(scheduled) >= numScheduledPods {
@ -64,7 +64,7 @@ func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
}
// start benchmark
b.ResetTimer()
makePods(c, b.N)
makePodsFromRC(c, "rc2", b.N)
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// TODO: Setup watch on apiserver and wait until all pods scheduled.

View File

@ -42,7 +42,7 @@ func schedulePods(numNodes, numPods int) {
c := schedulerConfigFactory.Client
makeNodes(c, numNodes)
makePods(c, numPods)
makePodsFromRC(c, "rc1", numPods)
prev := 0
start := time.Now()

View File

@ -42,8 +42,8 @@ kube::log::status "performance test start"
if ${RUN_BENCHMARK:-false}; then
go test -c -o "perf.test"
"./perf.test" -test.bench=. -test.run=xxxx -test.cpuprofile=prof.out
kube::log::status "benchmark tests finished"
fi
kube::log::status "benchmark tests finished"
# Running density tests. It might take a long time.
go test -test.run=. -test.timeout=60m
kube::log::status "density tests finished"

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
@ -92,7 +93,7 @@ func makeNodes(c client.Interface, nodeCount int) {
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
@ -109,54 +110,61 @@ func makeNodes(c client.Interface, nodeCount int) {
}
}
// makePods will setup specified number of scheduled pods.
// Currently it goes through scheduling path and it's very slow to setup large number of pods.
// TODO: Setup pods evenly on all nodes and quickly/non-linearly.
func makePods(c client.Interface, podCount int) {
glog.Infof("making %d pods", podCount)
func makePodSpec() api.PodSpec {
return api.PodSpec{
Containers: []api.Container{{
Name: "pause",
Image: "gcr.io/google_containers/pause:1.0",
Ports: []api.ContainerPort{{ContainerPort: 80}},
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
api.ResourceCPU: resource.MustParse("100m"),
api.ResourceMemory: resource.MustParse("500Mi"),
},
Requests: api.ResourceList{
api.ResourceCPU: resource.MustParse("100m"),
api.ResourceMemory: resource.MustParse("500Mi"),
},
},
}},
}
}
// makePodsFromRC will create a ReplicationController object and
// a given number of pods (imitating the controller).
func makePodsFromRC(c client.Interface, name string, podCount int) {
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.ReplicationControllerSpec{
Replicas: podCount,
Selector: map[string]string{"name": name},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: makePodSpec(),
},
},
}
if _, err := c.ReplicationControllers("default").Create(rc); err != nil {
glog.Fatalf("unexpected error: %v", err)
}
basePod := &api.Pod{
ObjectMeta: api.ObjectMeta{
GenerateName: "scheduler-test-pod-",
Labels: map[string]string{"name": name},
},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "pause",
Image: "gcr.io/google_containers/pause:1.0",
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
api.ResourceCPU: resource.MustParse("100m"),
api.ResourceMemory: resource.MustParse("500Mi"),
},
Requests: api.ResourceList{
api.ResourceCPU: resource.MustParse("100m"),
api.ResourceMemory: resource.MustParse("500Mi"),
},
},
}},
},
Spec: makePodSpec(),
}
threads := 30
remaining := make(chan int, 1000)
go func() {
for i := 0; i < podCount; i++ {
remaining <- i
}
close(remaining)
}()
for i := 0; i < threads; i++ {
go func() {
for {
_, ok := <-remaining
if !ok {
return
}
for {
_, err := c.Pods("default").Create(basePod)
if err == nil {
break
}
}
createPod := func(i int) {
for {
if _, err := c.Pods("default").Create(basePod); err == nil {
break
}
}()
}
}
workqueue.Parallelize(30, podCount, createPod)
}