From 53518e37a66c92ce940ef300773db6daebe2f6df Mon Sep 17 00:00:00 2001 From: David Oppenheimer Date: Fri, 3 Jul 2015 01:34:07 -0700 Subject: [PATCH] Add a test for DumbSpreadingPriority. --- .../algorithm/priorities/priorities.go | 14 +++- .../algorithm/priorities/priorities_test.go | 83 +++++++++++++++++++ .../algorithm/priorities/service_spreading.go | 5 ++ ...ding_test.go => service_spreading_test.go} | 0 plugin/pkg/scheduler/generic_scheduler.go | 5 +- 5 files changed, 102 insertions(+), 5 deletions(-) rename plugin/pkg/scheduler/algorithm/priorities/{spreading_test.go => service_spreading_test.go} (100%) diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities.go b/plugin/pkg/scheduler/algorithm/priorities/priorities.go index e42ea0fa49f..c1cce819f88 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities.go @@ -62,7 +62,8 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name) memoryScore := calculateScore(totalMemory, capacityMemory, node.Name) - glog.V(10).Infof( +// glog.V(10).Infof( + glog.Infof( "%v -> %v: Least Requested Priority, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d, %d)", pod.Name, node.Name, totalMilliCPU, totalMemory, @@ -121,9 +122,15 @@ func DumbSpreadingPriority(pod *api.Pod, podLister algorithm.PodLister, minionLi list := algorithm.HostPriorityList{} for _, node := range nodes.Items { npods := int64(len(podsToMachines[node.Name])) + score := calculateScore(min(npods+1, dumbSpreadingDenominator), dumbSpreadingDenominator, node.Name) +// glog.V(10).Infof( + glog.Infof( + "%v -> %v: DumbSpreadPriority, Old # pods (%d) Score: (%d)", + pod.Name, node.Name, npods, score, + ) list = append(list, algorithm.HostPriority{ Host: node.Name, - Score: calculateScore(min(npods+1, dumbSpreadingDenominator), dumbSpreadingDenominator, node.Name), + Score: score, }) } return list, nil @@ -225,7 +232,8 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap diff := math.Abs(cpuFraction - memoryFraction) score = int(10 - diff*10) } - glog.V(10).Infof( +// glog.V(10).Infof( + glog.Infof( "%v -> %v: Balanced Resource Allocation, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d)", pod.Name, node.Name, totalMilliCPU, totalMemory, diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go index ab43cecfedb..97c3a942841 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go @@ -23,6 +23,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" + "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm" ) @@ -38,6 +39,88 @@ func makeMinion(node string, milliCPU, memory int64) api.Node { } } +func TestDumbSpreading(t *testing.T) { + noResources := api.PodSpec{ + Containers: []api.Container{}, + } + small := api.PodSpec{ + NodeName: "machine1", + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + "cpu": resource.MustParse("100m"), + "memory": resource.MustParse("1000"), + }, + }, + }, + }, + } + large := api.PodSpec{ + NodeName: "machine2", + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Limits: api.ResourceList{ + "cpu": resource.MustParse("600m"), + "memory": resource.MustParse("6000"), + }, + }, + }, + }, + } + tests := []struct { + pod *api.Pod + pods []*api.Pod + nodes []api.Node + expectedList algorithm.HostPriorityList + test string + }{ + { + /* Minion1 CPU capacity 1000m, free 700m/7000, 3 pods + LeastRequestedPriority score 7 + BalancedResourceAllocation score 10 + ServiceSpreadingPriority score 10 + DumbSpreadingPriority score 6 + Total: 7 + 10 + 10 + 2*6 = 39 + + Minion2 CPU capacity 1000m, free 400m/4000, 1 pod + LeastRequestedPriority score 4 + BalancedResourceAllocation score 10 + ServiceSpreadingPriority score 10 + DumbSpreadingPriority score 8 + Total: 4 + 10 + 10 + 2*8 = 40 + + Moral of the story: We prefer the machine that is more heavily loaded, + because it has fewer pods. + */ + pod: &api.Pod{Spec: noResources}, + nodes: []api.Node{makeMinion("machine1", 1000, 10000), makeMinion("machine2", 1000, 10000)}, + expectedList: []algorithm.HostPriority{{"machine1", 39}, {"machine2", 40}}, + test: "nothing scheduled, nothing requested", + pods: []*api.Pod { + {Spec: small}, {Spec: small}, {Spec: small}, + {Spec: large}, + }, + }, + } + + for _, test := range tests { + list, err := scheduler.PrioritizeNodes( + test.pod, + algorithm.FakePodLister(test.pods), + []algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: DumbSpreadingPriority, Weight: 2}, {Function: NewServiceSpreadPriority(algorithm.FakeServiceLister([]api.Service{})), Weight: 1}}, + algorithm.FakeMinionLister(api.NodeList{Items: test.nodes})) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) + } + } +} + + func TestLeastRequested(t *testing.T) { labels1 := map[string]string{ "foo": "bar", diff --git a/plugin/pkg/scheduler/algorithm/priorities/service_spreading.go b/plugin/pkg/scheduler/algorithm/priorities/service_spreading.go index eaddad66d0d..ff9216e62d6 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/service_spreading.go +++ b/plugin/pkg/scheduler/algorithm/priorities/service_spreading.go @@ -20,6 +20,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm" + "github.com/golang/glog" ) type ServiceSpread struct { @@ -82,6 +83,10 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorith fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount)) } result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)}) + // glog.V(10).Infof( + glog.Infof( + "%v -> %v: ServiceSpreadPriority, Sore: (%d)", pod.Name, minion.Name, int(fScore), + ) } return result, nil } diff --git a/plugin/pkg/scheduler/algorithm/priorities/spreading_test.go b/plugin/pkg/scheduler/algorithm/priorities/service_spreading_test.go similarity index 100% rename from plugin/pkg/scheduler/algorithm/priorities/spreading_test.go rename to plugin/pkg/scheduler/algorithm/priorities/service_spreading_test.go diff --git a/plugin/pkg/scheduler/generic_scheduler.go b/plugin/pkg/scheduler/generic_scheduler.go index 6de52d7c6a6..26a8f8cd2f5 100644 --- a/plugin/pkg/scheduler/generic_scheduler.go +++ b/plugin/pkg/scheduler/generic_scheduler.go @@ -71,7 +71,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionL return "", err } - priorityList, err := prioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes)) + priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes)) if err != nil { return "", err } @@ -139,7 +139,7 @@ func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFunc // Each priority function can also have its own weight // The minion scores returned by the priority function are multiplied by the weights to get weighted scores // All scores are finally combined (added) to get the total weighted scores of all minions -func prioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { +func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { result := algorithm.HostPriorityList{} // If no priority configs are provided, then the EqualPriority function is applied @@ -165,6 +165,7 @@ func prioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfig } } for host, score := range combinedScores { + glog.V(10).Infof("Host %s Score %d", host, score) result = append(result, algorithm.HostPriority{Host: host, Score: score}) } return result, nil