mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 23:17:50 +00:00
Move pkg/scheduler to plugin/pkg/scheduler
As the TODO in plugin/pkg/scheduler/scheduler.go described: move everything from pkg/scheduler into this package. Remove references from registry.
This commit is contained in:
212
plugin/pkg/scheduler/algorithm/priorities/priorities.go
Normal file
212
plugin/pkg/scheduler/algorithm/priorities/priorities.go
Normal file
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// the unused capacity is calculated on a scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
func calculateScore(requested, capacity int64, node string) int {
|
||||
if capacity == 0 {
|
||||
return 0
|
||||
}
|
||||
if requested > capacity {
|
||||
glog.Infof("Combined requested resources from existing pods exceeds capacity on minion: %s", node)
|
||||
return 0
|
||||
}
|
||||
return int(((capacity - requested) * 10) / capacity)
|
||||
}
|
||||
|
||||
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
|
||||
// 'pods' is a list of pods currently scheduled on the node.
|
||||
func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
|
||||
totalMilliCPU := int64(0)
|
||||
totalMemory := int64(0)
|
||||
for _, existingPod := range pods {
|
||||
for _, container := range existingPod.Spec.Containers {
|
||||
totalMilliCPU += container.Resources.Limits.Cpu().MilliValue()
|
||||
totalMemory += container.Resources.Limits.Memory().Value()
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, minions.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
totalMilliCPU += container.Resources.Limits.Cpu().MilliValue()
|
||||
totalMemory += container.Resources.Limits.Memory().Value()
|
||||
}
|
||||
|
||||
capacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()
|
||||
capacityMemory := node.Status.Capacity.Memory().Value()
|
||||
|
||||
cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name)
|
||||
memoryScore := calculateScore(totalMemory, capacityMemory, node.Name)
|
||||
glog.V(4).Infof(
|
||||
"%v -> %v: Least Requested Priority, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d, %d)",
|
||||
pod.Name, node.Name,
|
||||
totalMilliCPU, totalMemory,
|
||||
capacityMilliCPU, capacityMemory,
|
||||
cpuScore, memoryScore,
|
||||
)
|
||||
|
||||
return algorithm.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: int((cpuScore + memoryScore) / 2),
|
||||
}
|
||||
}
|
||||
|
||||
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the minimum of the average of the fraction of requested to capacity.
|
||||
// Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50
|
||||
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := minionLister.List()
|
||||
if err != nil {
|
||||
return algorithm.HostPriorityList{}, err
|
||||
}
|
||||
podsToMachines, err := predicates.MapPodsToMachines(podLister)
|
||||
|
||||
list := algorithm.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
list = append(list, calculateOccupancy(pod, node, podsToMachines[node.Name]))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
type NodeLabelPrioritizer struct {
|
||||
label string
|
||||
presence bool
|
||||
}
|
||||
|
||||
func NewNodeLabelPriority(label string, presence bool) algorithm.PriorityFunction {
|
||||
labelPrioritizer := &NodeLabelPrioritizer{
|
||||
label: label,
|
||||
presence: presence,
|
||||
}
|
||||
return labelPrioritizer.CalculateNodeLabelPriority
|
||||
}
|
||||
|
||||
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.
|
||||
// If presence is true, prioritizes minions that have the specified label, regardless of value.
|
||||
// If presence is false, prioritizes minions that do not have the specified label.
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
var score int
|
||||
minions, err := minionLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labeledMinions := map[string]bool{}
|
||||
for _, minion := range minions.Items {
|
||||
exists := labels.Set(minion.Labels).Has(n.label)
|
||||
labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)
|
||||
}
|
||||
|
||||
result := []algorithm.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for minionName, success := range labeledMinions {
|
||||
if success {
|
||||
score = 10
|
||||
} else {
|
||||
score = 0
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: minionName, Score: score})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// BalancedResourceAllocation favors nodes with balanced resource usage rate.
|
||||
// BalancedResourceAllocation should **NOT** be used alone, and **MUST** be used together with LeastRequestedPriority.
|
||||
// It calculates the difference between the cpu and memory fracion of capacity, and prioritizes the host based on how
|
||||
// close the two metrics are to each other.
|
||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
||||
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := minionLister.List()
|
||||
if err != nil {
|
||||
return algorithm.HostPriorityList{}, err
|
||||
}
|
||||
podsToMachines, err := predicates.MapPodsToMachines(podLister)
|
||||
|
||||
list := algorithm.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
list = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name]))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
|
||||
totalMilliCPU := int64(0)
|
||||
totalMemory := int64(0)
|
||||
score := int(0)
|
||||
for _, existingPod := range pods {
|
||||
for _, container := range existingPod.Spec.Containers {
|
||||
totalMilliCPU += container.Resources.Limits.Cpu().MilliValue()
|
||||
totalMemory += container.Resources.Limits.Memory().Value()
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, minions.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
totalMilliCPU += container.Resources.Limits.Cpu().MilliValue()
|
||||
totalMemory += container.Resources.Limits.Memory().Value()
|
||||
}
|
||||
|
||||
capacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()
|
||||
capacityMemory := node.Status.Capacity.Memory().Value()
|
||||
|
||||
cpuFraction := fractionOfCapacity(totalMilliCPU, capacityMilliCPU, node.Name)
|
||||
memoryFraction := fractionOfCapacity(totalMemory, capacityMemory, node.Name)
|
||||
if cpuFraction >= 1 || memoryFraction >= 1 {
|
||||
// if requested >= capacity, the corresponding host should never be preferrred.
|
||||
score = 0
|
||||
} else {
|
||||
// Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1
|
||||
// respectively. Multilying the absolute value of the difference by 10 scales the value to
|
||||
// 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from
|
||||
// 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.
|
||||
diff := math.Abs(cpuFraction - memoryFraction)
|
||||
score = int(10 - diff*10)
|
||||
}
|
||||
glog.V(4).Infof(
|
||||
"%v -> %v: Balanced Resource Allocation, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d)",
|
||||
pod.Name, node.Name,
|
||||
totalMilliCPU, totalMemory,
|
||||
capacityMilliCPU, capacityMemory,
|
||||
score,
|
||||
)
|
||||
|
||||
return algorithm.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: score,
|
||||
}
|
||||
}
|
||||
|
||||
func fractionOfCapacity(requested, capacity int64, node string) float64 {
|
||||
if capacity == 0 {
|
||||
return 1
|
||||
}
|
||||
return float64(requested) / float64(capacity)
|
||||
}
|
||||
602
plugin/pkg/scheduler/algorithm/priorities/priorities_test.go
Normal file
602
plugin/pkg/scheduler/algorithm/priorities/priorities_test.go
Normal file
@@ -0,0 +1,602 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func makeMinion(node string, milliCPU, memory int64) api.Node {
|
||||
return api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeastRequested(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
machine1Spec := api.PodSpec{
|
||||
Host: "machine1",
|
||||
}
|
||||
machine2Spec := api.PodSpec{
|
||||
Host: "machine2",
|
||||
}
|
||||
noResources := api.PodSpec{
|
||||
Containers: []api.Container{},
|
||||
}
|
||||
cpuOnly := api.PodSpec{
|
||||
Host: "machine1",
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.Host = "machine2"
|
||||
cpuAndMemory := api.PodSpec{
|
||||
Host: "machine2",
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("3000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
expectedList algorithm.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
/*
|
||||
Minion1 scores (remaining resources) on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion1 Score: (10 + 10) / 2 = 10
|
||||
|
||||
Minion2 scores (remaining resources) on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 3000) *10) / 4000 = 2.5
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Minion1 Score: (2.5 + 5) / 2 = 3
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Score: ((6000 - 3000) *10) / 6000 = 5
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Minion2 Score: (5 + 5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 3}, {"machine2", 5}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion1 Score: (10 + 10) / 2 = 10
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 0) *10) / 20000 = 10
|
||||
Minion1 Score: (4 + 10) / 2 = 7
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||
Minion2 Score: (4 + 7.5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||
Minion1 Score: (4 + 7.5) / 2 = 5
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 10000) *10) / 20000 = 5
|
||||
Minion2 Score: (4 + 5) / 2 = 4
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||
Minion1 Score: (4 + 7.5) / 2 = 5
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((50000 - 10000) *10) / 50000 = 8
|
||||
Minion2 Score: (4 + 8) / 2 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 6000) *10) / 4000 = 0
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion1 Score: (0 + 10) / 2 = 5
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 6000) *10) / 4000 = 0
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Minion2 Score: (0 + 5) / 2 = 2
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}},
|
||||
test: "requested resources exceed minion capacity",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero minion resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNodeLabelPriority(t *testing.T) {
|
||||
label1 := map[string]string{"foo": "bar"}
|
||||
label2 := map[string]string{"bar": "foo"}
|
||||
label3 := map[string]string{"bar": "baz"}
|
||||
tests := []struct {
|
||||
nodes []api.Node
|
||||
label string
|
||||
presence bool
|
||||
expectedList algorithm.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "baz",
|
||||
presence: true,
|
||||
test: "no match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "baz",
|
||||
presence: false,
|
||||
test: "no match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "foo",
|
||||
presence: true,
|
||||
test: "one match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "foo",
|
||||
presence: false,
|
||||
test: "one match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
|
||||
label: "bar",
|
||||
presence: true,
|
||||
test: "two matches found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
|
||||
label: "bar",
|
||||
presence: false,
|
||||
test: "two matches found, presence false",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
prioritizer := NodeLabelPrioritizer{
|
||||
label: test.label,
|
||||
presence: test.presence,
|
||||
}
|
||||
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
// sort the two lists to avoid failures on account of different ordering
|
||||
sort.Sort(test.expectedList)
|
||||
sort.Sort(list)
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBalancedResourceAllocation(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
machine1Spec := api.PodSpec{
|
||||
Host: "machine1",
|
||||
}
|
||||
machine2Spec := api.PodSpec{
|
||||
Host: "machine2",
|
||||
}
|
||||
noResources := api.PodSpec{
|
||||
Containers: []api.Container{},
|
||||
}
|
||||
cpuOnly := api.PodSpec{
|
||||
Host: "machine1",
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.Host = "machine2"
|
||||
cpuAndMemory := api.PodSpec{
|
||||
Host: "machine2",
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("3000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
expectedList algorithm.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
/*
|
||||
Minion1 scores (remaining resources) on 0-10 scale
|
||||
CPU Fraction: 0 / 4000 = 0%
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion1 Score: 10 - (0-0)*10 = 10
|
||||
|
||||
Minion2 scores (remaining resources) on 0-10 scale
|
||||
CPU Fraction: 0 / 4000 = 0 %
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Fraction: 3000 / 4000= 75%
|
||||
Memory Fraction: 5000 / 10000 = 50%
|
||||
Minion1 Score: 10 - (0.75-0.5)*10 = 7
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Fraction: 3000 / 6000= 50%
|
||||
Memory Fraction: 5000/10000 = 50%
|
||||
Minion2 Score: 10 - (0.5-0.5)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 10}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Fraction: 0 / 4000= 0%
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion1 Score: 10 - (0-0)*10 = 10
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Fraction: 0 / 4000= 0%
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 0 / 20000 = 0%
|
||||
Minion1 Score: 10 - (0.6-0)*10 = 4
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 5000 / 20000 = 25%
|
||||
Minion2 Score: 10 - (0.6-0.25)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 5000 / 20000 = 25%
|
||||
Minion1 Score: 10 - (0.6-0.25)*10 = 6
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 10000 / 20000 = 50%
|
||||
Minion2 Score: 10 - (0.6-0.5)*10 = 9
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 5000 / 20000 = 25%
|
||||
Minion1 Score: 10 - (0.6-0.25)*10 = 6
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 10000 / 50000 = 20%
|
||||
Minion2 Score: 10 - (0.6-0.2)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
|
||||
Memory Fraction: 0 / 10000 = 0
|
||||
Minion1 Score: 0
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
|
||||
Memory Fraction 5000 / 10000 = 50%
|
||||
Minion2 Score: 0
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "requested resources exceed minion capacity",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero minion resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
169
plugin/pkg/scheduler/algorithm/priorities/spreading.go
Normal file
169
plugin/pkg/scheduler/algorithm/priorities/spreading.go
Normal file
@@ -0,0 +1,169 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
type ServiceSpread struct {
|
||||
serviceLister algorithm.ServiceLister
|
||||
}
|
||||
|
||||
func NewServiceSpreadPriority(serviceLister algorithm.ServiceLister) algorithm.PriorityFunction {
|
||||
serviceSpread := &ServiceSpread{
|
||||
serviceLister: serviceLister,
|
||||
}
|
||||
return serviceSpread.CalculateSpreadPriority
|
||||
}
|
||||
|
||||
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||
// on the same machine.
|
||||
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
var maxCount int
|
||||
var nsServicePods []*api.Pod
|
||||
|
||||
services, err := s.serviceLister.GetPodServices(pod)
|
||||
if err == nil {
|
||||
// just use the first service and get the other pods within the service
|
||||
// TODO: a separate predicate can be created that tries to handle all services for the pod
|
||||
selector := labels.SelectorFromSet(services[0].Spec.Selector)
|
||||
pods, err := podLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// consider only the pods that belong to the same namespace
|
||||
for _, nsPod := range pods {
|
||||
if nsPod.Namespace == pod.Namespace {
|
||||
nsServicePods = append(nsServicePods, nsPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
minions, err := minionLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
counts := map[string]int{}
|
||||
if len(nsServicePods) > 0 {
|
||||
for _, pod := range nsServicePods {
|
||||
counts[pod.Spec.Host]++
|
||||
// Compute the maximum number of pods hosted on any minion
|
||||
if counts[pod.Spec.Host] > maxCount {
|
||||
maxCount = counts[pod.Spec.Host]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result := []algorithm.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for _, minion := range minions.Items {
|
||||
// initializing to the default/max minion score of 10
|
||||
fScore := float32(10)
|
||||
if maxCount > 0 {
|
||||
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type ServiceAntiAffinity struct {
|
||||
serviceLister algorithm.ServiceLister
|
||||
label string
|
||||
}
|
||||
|
||||
func NewServiceAntiAffinityPriority(serviceLister algorithm.ServiceLister, label string) algorithm.PriorityFunction {
|
||||
antiAffinity := &ServiceAntiAffinity{
|
||||
serviceLister: serviceLister,
|
||||
label: label,
|
||||
}
|
||||
return antiAffinity.CalculateAntiAffinityPriority
|
||||
}
|
||||
|
||||
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||
// on machines with the same value for a particular label.
|
||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
var nsServicePods []*api.Pod
|
||||
|
||||
services, err := s.serviceLister.GetPodServices(pod)
|
||||
if err == nil {
|
||||
// just use the first service and get the other pods within the service
|
||||
// TODO: a separate predicate can be created that tries to handle all services for the pod
|
||||
selector := labels.SelectorFromSet(services[0].Spec.Selector)
|
||||
pods, err := podLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// consider only the pods that belong to the same namespace
|
||||
for _, nsPod := range pods {
|
||||
if nsPod.Namespace == pod.Namespace {
|
||||
nsServicePods = append(nsServicePods, nsPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
minions, err := minionLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// separate out the minions that have the label from the ones that don't
|
||||
otherMinions := []string{}
|
||||
labeledMinions := map[string]string{}
|
||||
for _, minion := range minions.Items {
|
||||
if labels.Set(minion.Labels).Has(s.label) {
|
||||
label := labels.Set(minion.Labels).Get(s.label)
|
||||
labeledMinions[minion.Name] = label
|
||||
} else {
|
||||
otherMinions = append(otherMinions, minion.Name)
|
||||
}
|
||||
}
|
||||
|
||||
podCounts := map[string]int{}
|
||||
for _, pod := range nsServicePods {
|
||||
label, exists := labeledMinions[pod.Spec.Host]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
podCounts[label]++
|
||||
}
|
||||
|
||||
numServicePods := len(nsServicePods)
|
||||
result := []algorithm.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for minion := range labeledMinions {
|
||||
// initializing to the default/max minion score of 10
|
||||
fScore := float32(10)
|
||||
if numServicePods > 0 {
|
||||
fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: minion, Score: int(fScore)})
|
||||
}
|
||||
// add the open minions with a score of 0
|
||||
for _, minion := range otherMinions {
|
||||
result = append(result, algorithm.HostPriority{Host: minion, Score: 0})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
362
plugin/pkg/scheduler/algorithm/priorities/spreading_test.go
Normal file
362
plugin/pkg/scheduler/algorithm/priorities/spreading_test.go
Normal file
@@ -0,0 +1,362 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func TestServiceSpreadPriority(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
zone1Spec := api.PodSpec{
|
||||
Host: "machine1",
|
||||
}
|
||||
zone2Spec := api.PodSpec{
|
||||
Host: "machine2",
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []string
|
||||
services []api.Service
|
||||
expectedList algorithm.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "different services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "two pods, one service pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "five pods, one service pod in no namespace",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "four pods, one service pod in default namespace",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "five pods, one service pod in specific namespace",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "three pods, two service pods on different machines",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 0}},
|
||||
test: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
|
||||
test: "service with partial pod label matches",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
serviceSpread := ServiceSpread{serviceLister: algorithm.FakeServiceLister(test.services)}
|
||||
list, err := serviceSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes)))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestZoneSpreadPriority(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
zone1 := map[string]string{
|
||||
"zone": "zone1",
|
||||
}
|
||||
zone2 := map[string]string{
|
||||
"zone": "zone2",
|
||||
}
|
||||
nozone := map[string]string{
|
||||
"name": "value",
|
||||
}
|
||||
zone0Spec := api.PodSpec{
|
||||
Host: "machine01",
|
||||
}
|
||||
zone1Spec := api.PodSpec{
|
||||
Host: "machine11",
|
||||
}
|
||||
zone2Spec := api.PodSpec{
|
||||
Host: "machine21",
|
||||
}
|
||||
labeledNodes := map[string]map[string]string{
|
||||
"machine01": nozone, "machine02": nozone,
|
||||
"machine11": zone1, "machine12": zone1,
|
||||
"machine21": zone2, "machine22": zone2,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes map[string]map[string]string
|
||||
services []api.Service
|
||||
expectedList algorithm.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
nodes: labeledNodes,
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||
nodes: labeledNodes,
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "no services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "different services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
|
||||
{"machine21", 0}, {"machine22", 0},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "three pods, one service pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 5}, {"machine12", 5},
|
||||
{"machine21", 5}, {"machine22", 5},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "three pods, two service pods on different machines",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 0}, {"machine12", 0},
|
||||
{"machine21", 10}, {"machine22", 10},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "three service label match pods in different namespaces",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 6}, {"machine12", 6},
|
||||
{"machine21", 3}, {"machine22", 3},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 3}, {"machine12", 3},
|
||||
{"machine21", 6}, {"machine22", 6},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "service with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7},
|
||||
{"machine21", 5}, {"machine22", 5},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "service pod on non-zoned minion",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
zoneSpread := ServiceAntiAffinity{serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"}
|
||||
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeLabeledMinionList(test.nodes)))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
// sort the two lists to avoid failures on account of different ordering
|
||||
sort.Sort(test.expectedList)
|
||||
sort.Sort(list)
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeLabeledMinionList(nodeMap map[string]map[string]string) (result api.NodeList) {
|
||||
nodes := []api.Node{}
|
||||
for nodeName, labels := range nodeMap {
|
||||
nodes = append(nodes, api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
}
|
||||
return api.NodeList{Items: nodes}
|
||||
}
|
||||
|
||||
func makeNodeList(nodeNames []string) api.NodeList {
|
||||
result := api.NodeList{
|
||||
Items: make([]api.Node, len(nodeNames)),
|
||||
}
|
||||
for ix := range nodeNames {
|
||||
result.Items[ix].Name = nodeNames[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
Reference in New Issue
Block a user