Merge pull request #7955 from hurf/refactor_scheduler

Move pkg/scheduler to plugin/pkg/scheduler
This commit is contained in:
Rohit Jnagal 2015-05-14 10:28:52 -07:00
commit 532f6fdcef
22 changed files with 263 additions and 238 deletions

View File

@ -48,7 +48,6 @@ import (
kubeletTypes "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/types" kubeletTypes "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
utilErrors "github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors" utilErrors "github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors"
@ -56,6 +55,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/version" "github.com/GoogleCloudPlatform/kubernetes/pkg/version"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/golang/glog" "github.com/golang/glog"
cadvisorApi "github.com/google/cadvisor/info/v1" cadvisorApi "github.com/google/cadvisor/info/v1"
) )
@ -1354,7 +1354,7 @@ func (kl *Kubelet) checkCapacityExceeded(pods []*api.Pod) (fitting []*api.Pod, n
sort.Sort(podsByCreationTime(pods)) sort.Sort(podsByCreationTime(pods))
capacity := CapacityFromMachineInfo(info) capacity := CapacityFromMachineInfo(info)
return scheduler.CheckPodsExceedingCapacity(pods, capacity) return predicates.CheckPodsExceedingCapacity(pods, capacity)
} }
// handleOutOfDisk detects if pods can't fit due to lack of disk space. // handleOutOfDisk detects if pods can't fit due to lack of disk space.
@ -1403,7 +1403,7 @@ func (kl *Kubelet) checkNodeSelectorMatching(pods []*api.Pod) (fitting []*api.Po
return pods, nil return pods, nil
} }
for _, pod := range pods { for _, pod := range pods {
if !scheduler.PodMatchesNodeLabels(pod, node) { if !predicates.PodMatchesNodeLabels(pod, node) {
notFitting = append(notFitting, pod) notFitting = append(notFitting, pod)
continue continue
} }

View File

@ -18,7 +18,7 @@ package registrytest
import ( import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
) )
type Scheduler struct { type Scheduler struct {
@ -27,7 +27,7 @@ type Scheduler struct {
Machine string Machine string
} }
func (s *Scheduler) Schedule(pod *api.Pod, lister scheduler.MinionLister) (string, error) { func (s *Scheduler) Schedule(pod *api.Pod, lister algorithm.MinionLister) (string, error) {
s.Pod = pod s.Pod = pod
return s.Machine, s.Err return s.Machine, s.Err
} }

View File

@ -16,4 +16,4 @@ limitations under the License.
// Package scheduler contains a generic Scheduler interface and several // Package scheduler contains a generic Scheduler interface and several
// implementations. // implementations.
package scheduler package algorithm

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package algorithm
import ( import (
"fmt" "fmt"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package predicates
import ( import (
"fmt" "fmt"
@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
) )
type NodeInfo interface { type NodeInfo interface {
@ -154,14 +155,14 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
return true, nil return true, nil
} }
func NewResourceFitPredicate(info NodeInfo) FitPredicate { func NewResourceFitPredicate(info NodeInfo) algorithm.FitPredicate {
fit := &ResourceFit{ fit := &ResourceFit{
info: info, info: info,
} }
return fit.PodFitsResources return fit.PodFitsResources
} }
func NewSelectorMatchPredicate(info NodeInfo) FitPredicate { func NewSelectorMatchPredicate(info NodeInfo) algorithm.FitPredicate {
selector := &NodeSelector{ selector := &NodeSelector{
info: info, info: info,
} }
@ -201,7 +202,7 @@ type NodeLabelChecker struct {
presence bool presence bool
} }
func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) FitPredicate { func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) algorithm.FitPredicate {
labelChecker := &NodeLabelChecker{ labelChecker := &NodeLabelChecker{
info: info, info: info,
labels: labels, labels: labels,
@ -239,13 +240,13 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*
} }
type ServiceAffinity struct { type ServiceAffinity struct {
podLister PodLister podLister algorithm.PodLister
serviceLister ServiceLister serviceLister algorithm.ServiceLister
nodeInfo NodeInfo nodeInfo NodeInfo
labels []string labels []string
} }
func NewServiceAffinityPredicate(podLister PodLister, serviceLister ServiceLister, nodeInfo NodeInfo, labels []string) FitPredicate { func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) algorithm.FitPredicate {
affinity := &ServiceAffinity{ affinity := &ServiceAffinity{
podLister: podLister, podLister: podLister,
serviceLister: serviceLister, serviceLister: serviceLister,
@ -361,7 +362,7 @@ func getUsedPorts(pods ...*api.Pod) map[int]bool {
// MapPodsToMachines obtains a list of pods and pivots that list into a map where the keys are host names // MapPodsToMachines obtains a list of pods and pivots that list into a map where the keys are host names
// and the values are the list of pods running on that host. // and the values are the list of pods running on that host.
func MapPodsToMachines(lister PodLister) (map[string][]*api.Pod, error) { func MapPodsToMachines(lister algorithm.PodLister) (map[string][]*api.Pod, error) {
machineToPods := map[string][]*api.Pod{} machineToPods := map[string][]*api.Pod{}
// TODO: perform more targeted query... // TODO: perform more targeted query...
pods, err := lister.List(labels.Everything()) pods, err := lister.List(labels.Everything())

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package predicates
import ( import (
"fmt" "fmt"
@ -23,6 +23,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
) )
type FakeNodeInfo api.Node type FakeNodeInfo api.Node
@ -179,6 +180,23 @@ func TestPodFitsHost(t *testing.T) {
} }
} }
func newPod(host string, hostPorts ...int) *api.Pod {
networkPorts := []api.ContainerPort{}
for _, port := range hostPorts {
networkPorts = append(networkPorts, api.ContainerPort{HostPort: port})
}
return &api.Pod{
Spec: api.PodSpec{
Host: host,
Containers: []api.Container{
{
Ports: networkPorts,
},
},
},
}
}
func TestPodFitsPorts(t *testing.T) { func TestPodFitsPorts(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
@ -641,7 +659,7 @@ func TestServiceAffinity(t *testing.T) {
for _, test := range tests { for _, test := range tests {
nodes := []api.Node{node1, node2, node3, node4, node5} nodes := []api.Node{node1, node2, node3, node4, node5}
serviceAffinity := ServiceAffinity{FakePodLister(test.pods), FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels} serviceAffinity := ServiceAffinity{algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []*api.Pod{}, test.node) fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []*api.Pod{}, test.node)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@ -14,13 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package priorities
import ( import (
"math" "math"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/golang/glog" "github.com/golang/glog"
) )
@ -39,7 +41,7 @@ func calculateScore(requested, capacity int64, node string) int {
// Calculate the occupancy on a node. 'node' has information about the resources on the node. // Calculate the occupancy on a node. 'node' has information about the resources on the node.
// 'pods' is a list of pods currently scheduled on the node. // 'pods' is a list of pods currently scheduled on the node.
func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority { func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
totalMilliCPU := int64(0) totalMilliCPU := int64(0)
totalMemory := int64(0) totalMemory := int64(0)
for _, existingPod := range pods { for _, existingPod := range pods {
@ -68,9 +70,9 @@ func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriori
cpuScore, memoryScore, cpuScore, memoryScore,
) )
return HostPriority{ return algorithm.HostPriority{
host: node.Name, Host: node.Name,
score: int((cpuScore + memoryScore) / 2), Score: int((cpuScore + memoryScore) / 2),
} }
} }
@ -78,14 +80,14 @@ func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriori
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes // It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the minimum of the average of the fraction of requested to capacity. // based on the minimum of the average of the fraction of requested to capacity.
// Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50 // Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50
func LeastRequestedPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := minionLister.List()
if err != nil { if err != nil {
return HostPriorityList{}, err return algorithm.HostPriorityList{}, err
} }
podsToMachines, err := MapPodsToMachines(podLister) podsToMachines, err := predicates.MapPodsToMachines(podLister)
list := HostPriorityList{} list := algorithm.HostPriorityList{}
for _, node := range nodes.Items { for _, node := range nodes.Items {
list = append(list, calculateOccupancy(pod, node, podsToMachines[node.Name])) list = append(list, calculateOccupancy(pod, node, podsToMachines[node.Name]))
} }
@ -97,7 +99,7 @@ type NodeLabelPrioritizer struct {
presence bool presence bool
} }
func NewNodeLabelPriority(label string, presence bool) PriorityFunction { func NewNodeLabelPriority(label string, presence bool) algorithm.PriorityFunction {
labelPrioritizer := &NodeLabelPrioritizer{ labelPrioritizer := &NodeLabelPrioritizer{
label: label, label: label,
presence: presence, presence: presence,
@ -108,7 +110,7 @@ func NewNodeLabelPriority(label string, presence bool) PriorityFunction {
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value. // CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.
// If presence is true, prioritizes minions that have the specified label, regardless of value. // If presence is true, prioritizes minions that have the specified label, regardless of value.
// If presence is false, prioritizes minions that do not have the specified label. // If presence is false, prioritizes minions that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var score int var score int
minions, err := minionLister.List() minions, err := minionLister.List()
if err != nil { if err != nil {
@ -121,7 +123,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence) labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)
} }
result := []HostPriority{} result := []algorithm.HostPriority{}
//score int - scale of 0-10 //score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest // 0 being the lowest priority and 10 being the highest
for minionName, success := range labeledMinions { for minionName, success := range labeledMinions {
@ -130,7 +132,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
} else { } else {
score = 0 score = 0
} }
result = append(result, HostPriority{host: minionName, score: score}) result = append(result, algorithm.HostPriority{Host: minionName, Score: score})
} }
return result, nil return result, nil
} }
@ -141,21 +143,21 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
// close the two metrics are to each other. // close the two metrics are to each other.
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
func BalancedResourceAllocation(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := minionLister.List()
if err != nil { if err != nil {
return HostPriorityList{}, err return algorithm.HostPriorityList{}, err
} }
podsToMachines, err := MapPodsToMachines(podLister) podsToMachines, err := predicates.MapPodsToMachines(podLister)
list := HostPriorityList{} list := algorithm.HostPriorityList{}
for _, node := range nodes.Items { for _, node := range nodes.Items {
list = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name])) list = append(list, calculateBalancedResourceAllocation(pod, node, podsToMachines[node.Name]))
} }
return list, nil return list, nil
} }
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority { func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) algorithm.HostPriority {
totalMilliCPU := int64(0) totalMilliCPU := int64(0)
totalMemory := int64(0) totalMemory := int64(0)
score := int(0) score := int(0)
@ -196,9 +198,9 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
score, score,
) )
return HostPriority{ return algorithm.HostPriority{
host: node.Name, Host: node.Name,
score: score, Score: score,
} }
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package priorities
import ( import (
"reflect" "reflect"
@ -23,6 +23,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
) )
func makeMinion(node string, milliCPU, memory int64) api.Node { func makeMinion(node string, milliCPU, memory int64) api.Node {
@ -101,7 +102,7 @@ func TestLeastRequested(t *testing.T) {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []api.Node
expectedList HostPriorityList expectedList algorithm.HostPriorityList
test string test string
}{ }{
{ {
@ -118,7 +119,7 @@ func TestLeastRequested(t *testing.T) {
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested", test: "nothing scheduled, nothing requested",
}, },
{ {
@ -135,7 +136,7 @@ func TestLeastRequested(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
expectedList: []HostPriority{{"machine1", 3}, {"machine2", 5}}, expectedList: []algorithm.HostPriority{{"machine1", 3}, {"machine2", 5}},
test: "nothing scheduled, resources requested, differently sized machines", test: "nothing scheduled, resources requested, differently sized machines",
}, },
{ {
@ -152,7 +153,7 @@ func TestLeastRequested(t *testing.T) {
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled", test: "no resources requested, pods scheduled",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -175,7 +176,7 @@ func TestLeastRequested(t *testing.T) {
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 5}}, expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}},
test: "no resources requested, pods scheduled with resources", test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -198,7 +199,7 @@ func TestLeastRequested(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 4}}, expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}},
test: "resources requested, pods scheduled with resources", test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -219,7 +220,7 @@ func TestLeastRequested(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 6}}, expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines", test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -240,7 +241,7 @@ func TestLeastRequested(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuOnly}, pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 2}}, expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}},
test: "requested resources exceed minion capacity", test: "requested resources exceed minion capacity",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -250,7 +251,7 @@ func TestLeastRequested(t *testing.T) {
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)}, nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources", test: "zero minion resources, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -260,7 +261,7 @@ func TestLeastRequested(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
list, err := LeastRequestedPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes})) list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -278,7 +279,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
nodes []api.Node nodes []api.Node
label string label string
presence bool presence bool
expectedList HostPriorityList expectedList algorithm.HostPriorityList
test string test string
}{ }{
{ {
@ -287,7 +288,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
}, },
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
label: "baz", label: "baz",
presence: true, presence: true,
test: "no match found, presence true", test: "no match found, presence true",
@ -298,7 +299,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
}, },
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
label: "baz", label: "baz",
presence: false, presence: false,
test: "no match found, presence false", test: "no match found, presence false",
@ -309,7 +310,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
}, },
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
label: "foo", label: "foo",
presence: true, presence: true,
test: "one match found, presence true", test: "one match found, presence true",
@ -320,7 +321,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
}, },
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
label: "foo", label: "foo",
presence: false, presence: false,
test: "one match found, presence false", test: "one match found, presence false",
@ -331,7 +332,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
}, },
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
label: "bar", label: "bar",
presence: true, presence: true,
test: "two matches found, presence true", test: "two matches found, presence true",
@ -342,7 +343,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
}, },
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
label: "bar", label: "bar",
presence: false, presence: false,
test: "two matches found, presence false", test: "two matches found, presence false",
@ -354,7 +355,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
label: test.label, label: test.label,
presence: test.presence, presence: test.presence,
} }
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, FakeMinionLister(api.NodeList{Items: test.nodes})) list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -431,7 +432,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []api.Node
expectedList HostPriorityList expectedList algorithm.HostPriorityList
test string test string
}{ }{
{ {
@ -448,7 +449,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested", test: "nothing scheduled, nothing requested",
}, },
{ {
@ -465,7 +466,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 10}},
test: "nothing scheduled, resources requested, differently sized machines", test: "nothing scheduled, resources requested, differently sized machines",
}, },
{ {
@ -482,7 +483,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled", test: "no resources requested, pods scheduled",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -505,7 +506,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 4}, {"machine2", 6}}, expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}},
test: "no resources requested, pods scheduled with resources", test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
@ -528,7 +529,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 9}}, expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}},
test: "resources requested, pods scheduled with resources", test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -549,7 +550,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
expectedList: []HostPriority{{"machine1", 6}, {"machine2", 6}}, expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines", test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -570,7 +571,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
*/ */
pod: &api.Pod{Spec: cpuOnly}, pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "requested resources exceed minion capacity", test: "requested resources exceed minion capacity",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -580,7 +581,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)}, nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources", test: "zero minion resources, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
@ -590,7 +591,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
list, err := BalancedResourceAllocation(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes})) list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }

View File

@ -14,18 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package priorities
import ( import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
) )
type ServiceSpread struct { type ServiceSpread struct {
serviceLister ServiceLister serviceLister algorithm.ServiceLister
} }
func NewServiceSpreadPriority(serviceLister ServiceLister) PriorityFunction { func NewServiceSpreadPriority(serviceLister algorithm.ServiceLister) algorithm.PriorityFunction {
serviceSpread := &ServiceSpread{ serviceSpread := &ServiceSpread{
serviceLister: serviceLister, serviceLister: serviceLister,
} }
@ -34,7 +35,7 @@ func NewServiceSpreadPriority(serviceLister ServiceLister) PriorityFunction {
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service // CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
// on the same machine. // on the same machine.
func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var maxCount int var maxCount int
var nsServicePods []*api.Pod var nsServicePods []*api.Pod
@ -71,7 +72,7 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodListe
} }
} }
result := []HostPriority{} result := []algorithm.HostPriority{}
//score int - scale of 0-10 //score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest // 0 being the lowest priority and 10 being the highest
for _, minion := range minions.Items { for _, minion := range minions.Items {
@ -80,17 +81,17 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodListe
if maxCount > 0 { if maxCount > 0 {
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount)) fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
} }
result = append(result, HostPriority{host: minion.Name, score: int(fScore)}) result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
} }
return result, nil return result, nil
} }
type ServiceAntiAffinity struct { type ServiceAntiAffinity struct {
serviceLister ServiceLister serviceLister algorithm.ServiceLister
label string label string
} }
func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) PriorityFunction { func NewServiceAntiAffinityPriority(serviceLister algorithm.ServiceLister, label string) algorithm.PriorityFunction {
antiAffinity := &ServiceAntiAffinity{ antiAffinity := &ServiceAntiAffinity{
serviceLister: serviceLister, serviceLister: serviceLister,
label: label, label: label,
@ -101,7 +102,7 @@ func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) P
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service // CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
// on machines with the same value for a particular label. // on machines with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity). // The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var nsServicePods []*api.Pod var nsServicePods []*api.Pod
services, err := s.serviceLister.GetPodServices(pod) services, err := s.serviceLister.GetPodServices(pod)
@ -148,7 +149,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
} }
numServicePods := len(nsServicePods) numServicePods := len(nsServicePods)
result := []HostPriority{} result := []algorithm.HostPriority{}
//score int - scale of 0-10 //score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest // 0 being the lowest priority and 10 being the highest
for minion := range labeledMinions { for minion := range labeledMinions {
@ -157,11 +158,11 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
if numServicePods > 0 { if numServicePods > 0 {
fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods)) fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
} }
result = append(result, HostPriority{host: minion, score: int(fScore)}) result = append(result, algorithm.HostPriority{Host: minion, Score: int(fScore)})
} }
// add the open minions with a score of 0 // add the open minions with a score of 0
for _, minion := range otherMinions { for _, minion := range otherMinions {
result = append(result, HostPriority{host: minion, score: 0}) result = append(result, algorithm.HostPriority{Host: minion, Score: 0})
} }
return result, nil return result, nil

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package priorities
import ( import (
"reflect" "reflect"
@ -22,6 +22,7 @@ import (
"testing" "testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
) )
func TestServiceSpreadPriority(t *testing.T) { func TestServiceSpreadPriority(t *testing.T) {
@ -44,20 +45,20 @@ func TestServiceSpreadPriority(t *testing.T) {
pods []*api.Pod pods []*api.Pod
nodes []string nodes []string
services []api.Service services []api.Service
expectedList HostPriorityList expectedList algorithm.HostPriorityList
test string test string
}{ }{
{ {
pod: new(api.Pod), pod: new(api.Pod),
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled", test: "nothing scheduled",
}, },
{ {
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{{Spec: zone1Spec}}, pods: []*api.Pod{{Spec: zone1Spec}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no services", test: "no services",
}, },
{ {
@ -65,7 +66,7 @@ func TestServiceSpreadPriority(t *testing.T) {
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "different services", test: "different services",
}, },
{ {
@ -76,7 +77,7 @@ func TestServiceSpreadPriority(t *testing.T) {
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "two pods, one service pod", test: "two pods, one service pod",
}, },
{ {
@ -90,7 +91,7 @@ func TestServiceSpreadPriority(t *testing.T) {
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "five pods, one service pod in no namespace", test: "five pods, one service pod in no namespace",
}, },
{ {
@ -103,7 +104,7 @@ func TestServiceSpreadPriority(t *testing.T) {
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "four pods, one service pod in default namespace", test: "four pods, one service pod in default namespace",
}, },
{ {
@ -117,7 +118,7 @@ func TestServiceSpreadPriority(t *testing.T) {
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "five pods, one service pod in specific namespace", test: "five pods, one service pod in specific namespace",
}, },
{ {
@ -129,7 +130,7 @@ func TestServiceSpreadPriority(t *testing.T) {
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "three pods, two service pods on different machines", test: "three pods, two service pods on different machines",
}, },
{ {
@ -142,7 +143,7 @@ func TestServiceSpreadPriority(t *testing.T) {
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 0}},
test: "four pods, three service pods", test: "four pods, three service pods",
}, },
{ {
@ -154,14 +155,14 @@ func TestServiceSpreadPriority(t *testing.T) {
}, },
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 5}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}},
test: "service with partial pod label matches", test: "service with partial pod label matches",
}, },
} }
for _, test := range tests { for _, test := range tests {
serviceSpread := ServiceSpread{serviceLister: FakeServiceLister(test.services)} serviceSpread := ServiceSpread{serviceLister: algorithm.FakeServiceLister(test.services)}
list, err := serviceSpread.CalculateSpreadPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeNodeList(test.nodes))) list, err := serviceSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes)))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -208,13 +209,13 @@ func TestZoneSpreadPriority(t *testing.T) {
pods []*api.Pod pods []*api.Pod
nodes map[string]map[string]string nodes map[string]map[string]string
services []api.Service services []api.Service
expectedList HostPriorityList expectedList algorithm.HostPriorityList
test string test string
}{ }{
{ {
pod: new(api.Pod), pod: new(api.Pod),
nodes: labeledNodes, nodes: labeledNodes,
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10}, expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10}, {"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "nothing scheduled", test: "nothing scheduled",
@ -223,7 +224,7 @@ func TestZoneSpreadPriority(t *testing.T) {
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []*api.Pod{{Spec: zone1Spec}}, pods: []*api.Pod{{Spec: zone1Spec}},
nodes: labeledNodes, nodes: labeledNodes,
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10}, expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10}, {"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "no services", test: "no services",
@ -233,7 +234,7 @@ func TestZoneSpreadPriority(t *testing.T) {
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
nodes: labeledNodes, nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10}, expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 10}, {"machine22", 10}, {"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "different services", test: "different services",
@ -247,7 +248,7 @@ func TestZoneSpreadPriority(t *testing.T) {
}, },
nodes: labeledNodes, nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10}, expectedList: []algorithm.HostPriority{{"machine11", 10}, {"machine12", 10},
{"machine21", 0}, {"machine22", 0}, {"machine21", 0}, {"machine22", 0},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "three pods, one service pod", test: "three pods, one service pod",
@ -261,7 +262,7 @@ func TestZoneSpreadPriority(t *testing.T) {
}, },
nodes: labeledNodes, nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 5}, {"machine12", 5}, expectedList: []algorithm.HostPriority{{"machine11", 5}, {"machine12", 5},
{"machine21", 5}, {"machine22", 5}, {"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "three pods, two service pods on different machines", test: "three pods, two service pods on different machines",
@ -276,7 +277,7 @@ func TestZoneSpreadPriority(t *testing.T) {
}, },
nodes: labeledNodes, nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
expectedList: []HostPriority{{"machine11", 0}, {"machine12", 0}, expectedList: []algorithm.HostPriority{{"machine11", 0}, {"machine12", 0},
{"machine21", 10}, {"machine22", 10}, {"machine21", 10}, {"machine22", 10},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "three service label match pods in different namespaces", test: "three service label match pods in different namespaces",
@ -291,7 +292,7 @@ func TestZoneSpreadPriority(t *testing.T) {
}, },
nodes: labeledNodes, nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 6}, {"machine12", 6}, expectedList: []algorithm.HostPriority{{"machine11", 6}, {"machine12", 6},
{"machine21", 3}, {"machine22", 3}, {"machine21", 3}, {"machine22", 3},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "four pods, three service pods", test: "four pods, three service pods",
@ -305,7 +306,7 @@ func TestZoneSpreadPriority(t *testing.T) {
}, },
nodes: labeledNodes, nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
expectedList: []HostPriority{{"machine11", 3}, {"machine12", 3}, expectedList: []algorithm.HostPriority{{"machine11", 3}, {"machine12", 3},
{"machine21", 6}, {"machine22", 6}, {"machine21", 6}, {"machine22", 6},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "service with partial pod label matches", test: "service with partial pod label matches",
@ -320,7 +321,7 @@ func TestZoneSpreadPriority(t *testing.T) {
}, },
nodes: labeledNodes, nodes: labeledNodes,
services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine11", 7}, {"machine12", 7}, expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7},
{"machine21", 5}, {"machine22", 5}, {"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "service pod on non-zoned minion", test: "service pod on non-zoned minion",
@ -328,8 +329,8 @@ func TestZoneSpreadPriority(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
zoneSpread := ServiceAntiAffinity{serviceLister: FakeServiceLister(test.services), label: "zone"} zoneSpread := ServiceAntiAffinity{serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"}
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeLabeledMinionList(test.nodes))) list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeLabeledMinionList(test.nodes)))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -349,3 +350,13 @@ func makeLabeledMinionList(nodeMap map[string]map[string]string) (result api.Nod
} }
return api.NodeList{Items: nodes} return api.NodeList{Items: nodes}
} }
func makeNodeList(nodeNames []string) api.NodeList {
result := api.NodeList{
Items: make([]api.Node, len(nodeNames)),
}
for ix := range nodeNames {
result.Items[ix].Name = nodeNames[ix]
}
return result
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package algorithm
import ( import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -22,6 +22,6 @@ import (
// Scheduler is an interface implemented by things that know how to schedule pods // Scheduler is an interface implemented by things that know how to schedule pods
// onto machines. // onto machines.
type Scheduler interface { type ScheduleAlgorithm interface {
Schedule(*api.Pod, MinionLister) (selectedMachine string, err error) Schedule(*api.Pod, MinionLister) (selectedMachine string, err error)
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package algorithm
import ( import (
"testing" "testing"
@ -26,7 +26,7 @@ import (
type schedulerTester struct { type schedulerTester struct {
t *testing.T t *testing.T
scheduler Scheduler scheduler ScheduleAlgorithm
minionLister MinionLister minionLister MinionLister
} }
@ -58,20 +58,3 @@ func (st *schedulerTester) expectFailure(pod *api.Pod) {
st.t.Error("Unexpected non-error") st.t.Error("Unexpected non-error")
} }
} }
func newPod(host string, hostPorts ...int) *api.Pod {
networkPorts := []api.ContainerPort{}
for _, port := range hostPorts {
networkPorts = append(networkPorts, api.ContainerPort{HostPort: port})
}
return &api.Pod{
Spec: api.PodSpec{
Host: host,
Containers: []api.Container{
{
Ports: networkPorts,
},
},
},
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package scheduler package algorithm
import ( import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -25,8 +25,8 @@ type FitPredicate func(pod *api.Pod, existingPods []*api.Pod, node string) (bool
// HostPriority represents the priority of scheduling to a particular host, lower priority is better. // HostPriority represents the priority of scheduling to a particular host, lower priority is better.
type HostPriority struct { type HostPriority struct {
host string Host string
score int Score int
} }
type HostPriorityList []HostPriority type HostPriorityList []HostPriority
@ -36,10 +36,10 @@ func (h HostPriorityList) Len() int {
} }
func (h HostPriorityList) Less(i, j int) bool { func (h HostPriorityList) Less(i, j int) bool {
if h[i].score == h[j].score { if h[i].Score == h[j].Score {
return h[i].host < h[j].host return h[i].Host < h[j].Host
} }
return h[i].score < h[j].score return h[i].Score < h[j].Score
} }
func (h HostPriorityList) Swap(i, j int) { func (h HostPriorityList) Swap(i, j int) {

View File

@ -18,8 +18,11 @@ limitations under the License.
package defaults package defaults
import ( import (
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/priorities"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory"
) )
@ -28,46 +31,46 @@ func init() {
// EqualPriority is a prioritizer function that gives an equal weight of one to all minions // EqualPriority is a prioritizer function that gives an equal weight of one to all minions
// Register the priority function so that its available // Register the priority function so that its available
// but do not include it as part of the default priorities // but do not include it as part of the default priorities
factory.RegisterPriorityFunction("EqualPriority", algorithm.EqualPriority, 1) factory.RegisterPriorityFunction("EqualPriority", scheduler.EqualPriority, 1)
} }
func defaultPredicates() util.StringSet { func defaultPredicates() util.StringSet {
return util.NewStringSet( return util.NewStringSet(
// Fit is defined based on the absence of port conflicts. // Fit is defined based on the absence of port conflicts.
factory.RegisterFitPredicate("PodFitsPorts", algorithm.PodFitsPorts), factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsPorts),
// Fit is determined by resource availability. // Fit is determined by resource availability.
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
"PodFitsResources", "PodFitsResources",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewResourceFitPredicate(args.NodeInfo) return predicates.NewResourceFitPredicate(args.NodeInfo)
}, },
), ),
// Fit is determined by non-conflicting disk volumes. // Fit is determined by non-conflicting disk volumes.
factory.RegisterFitPredicate("NoDiskConflict", algorithm.NoDiskConflict), factory.RegisterFitPredicate("NoDiskConflict", predicates.NoDiskConflict),
// Fit is determined by node selector query. // Fit is determined by node selector query.
factory.RegisterFitPredicateFactory( factory.RegisterFitPredicateFactory(
"MatchNodeSelector", "MatchNodeSelector",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate { func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewSelectorMatchPredicate(args.NodeInfo) return predicates.NewSelectorMatchPredicate(args.NodeInfo)
}, },
), ),
// Fit is determined by the presence of the Host parameter and a string match // Fit is determined by the presence of the Host parameter and a string match
factory.RegisterFitPredicate("HostName", algorithm.PodFitsHost), factory.RegisterFitPredicate("HostName", predicates.PodFitsHost),
) )
} }
func defaultPriorities() util.StringSet { func defaultPriorities() util.StringSet {
return util.NewStringSet( return util.NewStringSet(
// Prioritize nodes by least requested utilization. // Prioritize nodes by least requested utilization.
factory.RegisterPriorityFunction("LeastRequestedPriority", algorithm.LeastRequestedPriority, 1), factory.RegisterPriorityFunction("LeastRequestedPriority", priorities.LeastRequestedPriority, 1),
// Prioritizes nodes to help achieve balanced resource usage // Prioritizes nodes to help achieve balanced resource usage
factory.RegisterPriorityFunction("BalancedResourceAllocation", algorithm.BalancedResourceAllocation, 1), factory.RegisterPriorityFunction("BalancedResourceAllocation", priorities.BalancedResourceAllocation, 1),
// spreads pods by minimizing the number of pods (belonging to the same service) on the same minion. // spreads pods by minimizing the number of pods (belonging to the same service) on the same minion.
factory.RegisterPriorityConfigFactory( factory.RegisterPriorityConfigFactory(
"ServiceSpreadingPriority", "ServiceSpreadingPriority",
factory.PriorityConfigFactory{ factory.PriorityConfigFactory{
Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction { Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
return algorithm.NewServiceSpreadPriority(args.ServiceLister) return priorities.NewServiceSpreadPriority(args.ServiceLister)
}, },
Weight: 1, Weight: 1,
}, },

View File

@ -28,9 +28,9 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework" "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api" schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/validation" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/validation"
@ -182,7 +182,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe
r := rand.New(rand.NewSource(time.Now().UnixNano())) r := rand.New(rand.NewSource(time.Now().UnixNano()))
algo := algorithm.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r) algo := scheduler.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r)
podBackoff := podBackoff{ podBackoff := podBackoff{
perPodBackoff: map[string]*backoffEntry{}, perPodBackoff: map[string]*backoffEntry{},

View File

@ -29,8 +29,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api" schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
latestschedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/latest" latestschedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api/latest"
) )

View File

@ -22,8 +22,10 @@ import (
"strings" "strings"
"sync" "sync"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/priorities"
schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api" schedulerapi "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/api"
"github.com/golang/glog" "github.com/golang/glog"
@ -34,7 +36,7 @@ type PluginFactoryArgs struct {
algorithm.PodLister algorithm.PodLister
algorithm.ServiceLister algorithm.ServiceLister
NodeLister algorithm.MinionLister NodeLister algorithm.MinionLister
NodeInfo algorithm.NodeInfo NodeInfo predicates.NodeInfo
} }
// A FitPredicateFactory produces a FitPredicate from the given args. // A FitPredicateFactory produces a FitPredicate from the given args.
@ -95,7 +97,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
if policy.Argument != nil { if policy.Argument != nil {
if policy.Argument.ServiceAffinity != nil { if policy.Argument.ServiceAffinity != nil {
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate { predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewServiceAffinityPredicate( return predicates.NewServiceAffinityPredicate(
args.PodLister, args.PodLister,
args.ServiceLister, args.ServiceLister,
args.NodeInfo, args.NodeInfo,
@ -104,7 +106,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
} }
} else if policy.Argument.LabelsPresence != nil { } else if policy.Argument.LabelsPresence != nil {
predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate { predicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {
return algorithm.NewNodeLabelPredicate( return predicates.NewNodeLabelPredicate(
args.NodeInfo, args.NodeInfo,
policy.Argument.LabelsPresence.Labels, policy.Argument.LabelsPresence.Labels,
policy.Argument.LabelsPresence.Presence, policy.Argument.LabelsPresence.Presence,
@ -162,7 +164,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
if policy.Argument.ServiceAntiAffinity != nil { if policy.Argument.ServiceAntiAffinity != nil {
pcf = &PriorityConfigFactory{ pcf = &PriorityConfigFactory{
Function: func(args PluginFactoryArgs) algorithm.PriorityFunction { Function: func(args PluginFactoryArgs) algorithm.PriorityFunction {
return algorithm.NewServiceAntiAffinityPriority( return priorities.NewServiceAntiAffinityPriority(
args.ServiceLister, args.ServiceLister,
policy.Argument.ServiceAntiAffinity.Label, policy.Argument.ServiceAntiAffinity.Label,
) )
@ -172,7 +174,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {
} else if policy.Argument.LabelPreference != nil { } else if policy.Argument.LabelPreference != nil {
pcf = &PriorityConfigFactory{ pcf = &PriorityConfigFactory{
Function: func(args PluginFactoryArgs) algorithm.PriorityFunction { Function: func(args PluginFactoryArgs) algorithm.PriorityFunction {
return algorithm.NewNodeLabelPriority( return priorities.NewNodeLabelPriority(
policy.Argument.LabelPreference.Label, policy.Argument.LabelPreference.Label,
policy.Argument.LabelPreference.Presence, policy.Argument.LabelPreference.Presence,
) )

View File

@ -25,6 +25,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
) )
type FailedPredicateMap map[string]util.StringSet type FailedPredicateMap map[string]util.StringSet
@ -44,14 +46,14 @@ func (f *FitError) Error() string {
} }
type genericScheduler struct { type genericScheduler struct {
predicates map[string]FitPredicate predicates map[string]algorithm.FitPredicate
prioritizers []PriorityConfig prioritizers []algorithm.PriorityConfig
pods PodLister pods algorithm.PodLister
random *rand.Rand random *rand.Rand
randomLock sync.Mutex randomLock sync.Mutex
} }
func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (string, error) { func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionLister) (string, error) {
minions, err := minionLister.List() minions, err := minionLister.List()
if err != nil { if err != nil {
return "", err return "", err
@ -65,7 +67,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (st
return "", err return "", err
} }
priorityList, err := prioritizeNodes(pod, g.pods, g.prioritizers, FakeMinionLister(filteredNodes)) priorityList, err := prioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -81,7 +83,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (st
// This method takes a prioritized list of minions and sorts them in reverse order based on scores // This method takes a prioritized list of minions and sorts them in reverse order based on scores
// and then picks one randomly from the minions that had the highest score // and then picks one randomly from the minions that had the highest score
func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, error) { func (g *genericScheduler) selectHost(priorityList algorithm.HostPriorityList) (string, error) {
if len(priorityList) == 0 { if len(priorityList) == 0 {
return "", fmt.Errorf("empty priorityList") return "", fmt.Errorf("empty priorityList")
} }
@ -97,16 +99,16 @@ func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, er
// Filters the minions to find the ones that fit based on the given predicate functions // Filters the minions to find the ones that fit based on the given predicate functions
// Each minion is passed through the predicate functions to determine if it is a fit // Each minion is passed through the predicate functions to determine if it is a fit
func findNodesThatFit(pod *api.Pod, podLister PodLister, predicates map[string]FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) { func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFuncs map[string]algorithm.FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) {
filtered := []api.Node{} filtered := []api.Node{}
machineToPods, err := MapPodsToMachines(podLister) machineToPods, err := predicates.MapPodsToMachines(podLister)
failedPredicateMap := FailedPredicateMap{} failedPredicateMap := FailedPredicateMap{}
if err != nil { if err != nil {
return api.NodeList{}, FailedPredicateMap{}, err return api.NodeList{}, FailedPredicateMap{}, err
} }
for _, node := range nodes.Items { for _, node := range nodes.Items {
fits := true fits := true
for name, predicate := range predicates { for name, predicate := range predicateFuncs {
fit, err := predicate(pod, machineToPods[node.Name], node.Name) fit, err := predicate(pod, machineToPods[node.Name], node.Name)
if err != nil { if err != nil {
return api.NodeList{}, FailedPredicateMap{}, err return api.NodeList{}, FailedPredicateMap{}, err
@ -133,8 +135,8 @@ func findNodesThatFit(pod *api.Pod, podLister PodLister, predicates map[string]F
// Each priority function can also have its own weight // Each priority function can also have its own weight
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores // The minion scores returned by the priority function are multiplied by the weights to get weighted scores
// All scores are finally combined (added) to get the total weighted scores of all minions // All scores are finally combined (added) to get the total weighted scores of all minions
func prioritizeNodes(pod *api.Pod, podLister PodLister, priorityConfigs []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) { func prioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
result := HostPriorityList{} result := algorithm.HostPriorityList{}
// If no priority configs are provided, then the EqualPriority function is applied // If no priority configs are provided, then the EqualPriority function is applied
// This is required to generate the priority list in the required format // This is required to generate the priority list in the required format
@ -152,23 +154,23 @@ func prioritizeNodes(pod *api.Pod, podLister PodLister, priorityConfigs []Priori
priorityFunc := priorityConfig.Function priorityFunc := priorityConfig.Function
prioritizedList, err := priorityFunc(pod, podLister, minionLister) prioritizedList, err := priorityFunc(pod, podLister, minionLister)
if err != nil { if err != nil {
return HostPriorityList{}, err return algorithm.HostPriorityList{}, err
} }
for _, hostEntry := range prioritizedList { for _, hostEntry := range prioritizedList {
combinedScores[hostEntry.host] += hostEntry.score * weight combinedScores[hostEntry.Host] += hostEntry.Score * weight
} }
} }
for host, score := range combinedScores { for host, score := range combinedScores {
result = append(result, HostPriority{host: host, score: score}) result = append(result, algorithm.HostPriority{Host: host, Score: score})
} }
return result, nil return result, nil
} }
func getBestHosts(list HostPriorityList) []string { func getBestHosts(list algorithm.HostPriorityList) []string {
result := []string{} result := []string{}
for _, hostEntry := range list { for _, hostEntry := range list {
if hostEntry.score == list[0].score { if hostEntry.Score == list[0].Score {
result = append(result, hostEntry.host) result = append(result, hostEntry.Host)
} else { } else {
break break
} }
@ -177,24 +179,24 @@ func getBestHosts(list HostPriorityList) []string {
} }
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes // EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
func EqualPriority(_ *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := minionLister.List()
if err != nil { if err != nil {
fmt.Errorf("failed to list nodes: %v", err) fmt.Errorf("failed to list nodes: %v", err)
return []HostPriority{}, err return []algorithm.HostPriority{}, err
} }
result := []HostPriority{} result := []algorithm.HostPriority{}
for _, minion := range nodes.Items { for _, minion := range nodes.Items {
result = append(result, HostPriority{ result = append(result, algorithm.HostPriority{
host: minion.Name, Host: minion.Name,
score: 1, Score: 1,
}) })
} }
return result, nil return result, nil
} }
func NewGenericScheduler(predicates map[string]FitPredicate, prioritizers []PriorityConfig, pods PodLister, random *rand.Rand) Scheduler { func NewGenericScheduler(predicates map[string]algorithm.FitPredicate, prioritizers []algorithm.PriorityConfig, pods algorithm.PodLister, random *rand.Rand) algorithm.ScheduleAlgorithm {
return &genericScheduler{ return &genericScheduler{
predicates: predicates, predicates: predicates,
prioritizers: prioritizers, prioritizers: prioritizers,

View File

@ -25,6 +25,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
) )
func falsePredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { func falsePredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
@ -39,9 +40,9 @@ func matchesPredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool,
return pod.Name == node, nil return pod.Name == node, nil
} }
func numericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func numericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := minionLister.List()
result := []HostPriority{} result := []algorithm.HostPriority{}
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list nodes: %v", err) return nil, fmt.Errorf("failed to list nodes: %v", err)
@ -51,31 +52,31 @@ func numericPriority(pod *api.Pod, podLister PodLister, minionLister MinionListe
if err != nil { if err != nil {
return nil, err return nil, err
} }
result = append(result, HostPriority{ result = append(result, algorithm.HostPriority{
host: minion.Name, Host: minion.Name,
score: score, Score: score,
}) })
} }
return result, nil return result, nil
} }
func reverseNumericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { func reverseNumericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
var maxScore float64 var maxScore float64
minScore := math.MaxFloat64 minScore := math.MaxFloat64
reverseResult := []HostPriority{} reverseResult := []algorithm.HostPriority{}
result, err := numericPriority(pod, podLister, minionLister) result, err := numericPriority(pod, podLister, minionLister)
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, hostPriority := range result { for _, hostPriority := range result {
maxScore = math.Max(maxScore, float64(hostPriority.score)) maxScore = math.Max(maxScore, float64(hostPriority.Score))
minScore = math.Min(minScore, float64(hostPriority.score)) minScore = math.Min(minScore, float64(hostPriority.Score))
} }
for _, hostPriority := range result { for _, hostPriority := range result {
reverseResult = append(reverseResult, HostPriority{ reverseResult = append(reverseResult, algorithm.HostPriority{
host: hostPriority.host, Host: hostPriority.Host,
score: int(maxScore + minScore - float64(hostPriority.score)), Score: int(maxScore + minScore - float64(hostPriority.Score)),
}) })
} }
@ -95,44 +96,44 @@ func makeNodeList(nodeNames []string) api.NodeList {
func TestSelectHost(t *testing.T) { func TestSelectHost(t *testing.T) {
scheduler := genericScheduler{random: rand.New(rand.NewSource(0))} scheduler := genericScheduler{random: rand.New(rand.NewSource(0))}
tests := []struct { tests := []struct {
list HostPriorityList list algorithm.HostPriorityList
possibleHosts util.StringSet possibleHosts util.StringSet
expectsErr bool expectsErr bool
}{ }{
{ {
list: []HostPriority{ list: []algorithm.HostPriority{
{host: "machine1.1", score: 1}, {Host: "machine1.1", Score: 1},
{host: "machine2.1", score: 2}, {Host: "machine2.1", Score: 2},
}, },
possibleHosts: util.NewStringSet("machine2.1"), possibleHosts: util.NewStringSet("machine2.1"),
expectsErr: false, expectsErr: false,
}, },
// equal scores // equal scores
{ {
list: []HostPriority{ list: []algorithm.HostPriority{
{host: "machine1.1", score: 1}, {Host: "machine1.1", Score: 1},
{host: "machine1.2", score: 2}, {Host: "machine1.2", Score: 2},
{host: "machine1.3", score: 2}, {Host: "machine1.3", Score: 2},
{host: "machine2.1", score: 2}, {Host: "machine2.1", Score: 2},
}, },
possibleHosts: util.NewStringSet("machine1.2", "machine1.3", "machine2.1"), possibleHosts: util.NewStringSet("machine1.2", "machine1.3", "machine2.1"),
expectsErr: false, expectsErr: false,
}, },
// out of order scores // out of order scores
{ {
list: []HostPriority{ list: []algorithm.HostPriority{
{host: "machine1.1", score: 3}, {Host: "machine1.1", Score: 3},
{host: "machine1.2", score: 3}, {Host: "machine1.2", Score: 3},
{host: "machine2.1", score: 2}, {Host: "machine2.1", Score: 2},
{host: "machine3.1", score: 1}, {Host: "machine3.1", Score: 1},
{host: "machine1.3", score: 3}, {Host: "machine1.3", Score: 3},
}, },
possibleHosts: util.NewStringSet("machine1.1", "machine1.2", "machine1.3"), possibleHosts: util.NewStringSet("machine1.1", "machine1.2", "machine1.3"),
expectsErr: false, expectsErr: false,
}, },
// empty priorityList // empty priorityList
{ {
list: []HostPriority{}, list: []algorithm.HostPriority{},
possibleHosts: util.NewStringSet(), possibleHosts: util.NewStringSet(),
expectsErr: true, expectsErr: true,
}, },
@ -161,23 +162,23 @@ func TestSelectHost(t *testing.T) {
func TestGenericScheduler(t *testing.T) { func TestGenericScheduler(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
predicates map[string]FitPredicate predicates map[string]algorithm.FitPredicate
prioritizers []PriorityConfig prioritizers []algorithm.PriorityConfig
nodes []string nodes []string
pod *api.Pod pod *api.Pod
expectedHost string expectedHost string
expectsErr bool expectsErr bool
}{ }{
{ {
predicates: map[string]FitPredicate{"false": falsePredicate}, predicates: map[string]algorithm.FitPredicate{"false": falsePredicate},
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
expectsErr: true, expectsErr: true,
name: "test 1", name: "test 1",
}, },
{ {
predicates: map[string]FitPredicate{"true": truePredicate}, predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
// Random choice between both, the rand seeded above with zero, chooses "machine1" // Random choice between both, the rand seeded above with zero, chooses "machine1"
expectedHost: "machine1", expectedHost: "machine1",
@ -185,39 +186,39 @@ func TestGenericScheduler(t *testing.T) {
}, },
{ {
// Fits on a machine where the pod ID matches the machine name // Fits on a machine where the pod ID matches the machine name
predicates: map[string]FitPredicate{"matches": matchesPredicate}, predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: EqualPriority, Weight: 1}},
nodes: []string{"machine1", "machine2"}, nodes: []string{"machine1", "machine2"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}},
expectedHost: "machine2", expectedHost: "machine2",
name: "test 3", name: "test 3",
}, },
{ {
predicates: map[string]FitPredicate{"true": truePredicate}, predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
expectedHost: "3", expectedHost: "3",
name: "test 4", name: "test 4",
}, },
{ {
predicates: map[string]FitPredicate{"matches": matchesPredicate}, predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
expectedHost: "2", expectedHost: "2",
name: "test 5", name: "test 5",
}, },
{ {
predicates: map[string]FitPredicate{"true": truePredicate}, predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
expectedHost: "1", expectedHost: "1",
name: "test 6", name: "test 6",
}, },
{ {
predicates: map[string]FitPredicate{"true": truePredicate, "false": falsePredicate}, predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate},
prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
nodes: []string{"3", "2", "1"}, nodes: []string{"3", "2", "1"},
expectsErr: true, expectsErr: true,
name: "test 7", name: "test 7",
@ -226,8 +227,8 @@ func TestGenericScheduler(t *testing.T) {
for _, test := range tests { for _, test := range tests {
random := rand.New(rand.NewSource(0)) random := rand.New(rand.NewSource(0))
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, FakePodLister([]*api.Pod{}), random) scheduler := NewGenericScheduler(test.predicates, test.prioritizers, algorithm.FakePodLister([]*api.Pod{}), random)
machine, err := scheduler.Schedule(test.pod, FakeMinionLister(makeNodeList(test.nodes))) machine, err := scheduler.Schedule(test.pod, algorithm.FakeMinionLister(makeNodeList(test.nodes)))
if test.expectsErr { if test.expectsErr {
if err == nil { if err == nil {
t.Error("Unexpected non-error") t.Error("Unexpected non-error")
@ -245,8 +246,8 @@ func TestGenericScheduler(t *testing.T) {
func TestFindFitAllError(t *testing.T) { func TestFindFitAllError(t *testing.T) {
nodes := []string{"3", "2", "1"} nodes := []string{"3", "2", "1"}
predicates := map[string]FitPredicate{"true": truePredicate, "false": falsePredicate} predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate}
_, predicateMap, err := findNodesThatFit(&api.Pod{}, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes)) _, predicateMap, err := findNodesThatFit(&api.Pod{}, algorithm.FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
@ -269,9 +270,9 @@ func TestFindFitAllError(t *testing.T) {
func TestFindFitSomeError(t *testing.T) { func TestFindFitSomeError(t *testing.T) {
nodes := []string{"3", "2", "1"} nodes := []string{"3", "2", "1"}
predicates := map[string]FitPredicate{"true": truePredicate, "match": matchesPredicate} predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}
_, predicateMap, err := findNodesThatFit(pod, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes)) _, predicateMap, err := findNodesThatFit(pod, algorithm.FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)

View File

@ -25,7 +25,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/golang/glog" "github.com/golang/glog"
) )

View File

@ -21,9 +21,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
// TODO: move everything from pkg/scheduler into this package. Remove references from registry.
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/metrics" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/metrics"
"github.com/golang/glog" "github.com/golang/glog"
@ -70,8 +69,8 @@ type Config struct {
// It is expected that changes made via modeler will be observed // It is expected that changes made via modeler will be observed
// by MinionLister and Algorithm. // by MinionLister and Algorithm.
Modeler SystemModeler Modeler SystemModeler
MinionLister scheduler.MinionLister MinionLister algorithm.MinionLister
Algorithm scheduler.Scheduler Algorithm algorithm.ScheduleAlgorithm
Binder Binder Binder Binder
// NextPod should be a function that blocks until the next pod // NextPod should be a function that blocks until the next pod

View File

@ -27,8 +27,9 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
) )
type fakeBinder struct { type fakeBinder struct {
@ -59,7 +60,7 @@ type mockScheduler struct {
err error err error
} }
func (es mockScheduler) Schedule(pod *api.Pod, ml scheduler.MinionLister) (string, error) { func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.MinionLister) (string, error) {
return es.machine, es.err return es.machine, es.err
} }
@ -72,7 +73,7 @@ func TestScheduler(t *testing.T) {
table := []struct { table := []struct {
injectBindError error injectBindError error
sendPod *api.Pod sendPod *api.Pod
algo scheduler.Scheduler algo algorithm.ScheduleAlgorithm
expectErrorPod *api.Pod expectErrorPod *api.Pod
expectAssumedPod *api.Pod expectAssumedPod *api.Pod
expectError error expectError error
@ -113,7 +114,7 @@ func TestScheduler(t *testing.T) {
gotAssumedPod = pod gotAssumedPod = pod
}, },
}, },
MinionLister: scheduler.FakeMinionLister( MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
), ),
Algorithm: item.algo, Algorithm: item.algo,
@ -186,16 +187,16 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
firstPod := podWithPort("foo", "", podPort) firstPod := podWithPort("foo", "", podPort)
// Create the scheduler config // Create the scheduler config
algo := scheduler.NewGenericScheduler( algo := NewGenericScheduler(
map[string]scheduler.FitPredicate{"PodFitsPorts": scheduler.PodFitsPorts}, map[string]algorithm.FitPredicate{"PodFitsPorts": predicates.PodFitsPorts},
[]scheduler.PriorityConfig{}, []algorithm.PriorityConfig{},
modeler.PodLister(), modeler.PodLister(),
rand.New(rand.NewSource(time.Now().UnixNano()))) rand.New(rand.NewSource(time.Now().UnixNano())))
var gotBinding *api.Binding var gotBinding *api.Binding
c := &Config{ c := &Config{
Modeler: modeler, Modeler: modeler,
MinionLister: scheduler.FakeMinionLister( MinionLister: algorithm.FakeMinionLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
), ),
Algorithm: algo, Algorithm: algo,