Merge pull request #28781 from wojtek-t/optimize_priorities_2

Automatic merge from submit-queue

Change storeToNodeConditionLister to return []*api.Node instead of api.NodeList for performance



Currently copies that are made while copying/creating api.NodeList are significant part of scheduler profile, and a bunch of them are made in places, that are not-parallelizable.
Ref #28590
This commit is contained in:
k8s-merge-robot 2016-07-12 06:18:15 -07:00 committed by GitHub
commit 98030ded05
20 changed files with 212 additions and 196 deletions

View File

@ -151,11 +151,11 @@ type storeToNodeConditionLister struct {
} }
// List returns a list of nodes that match the conditions defined by the predicate functions in the storeToNodeConditionLister. // List returns a list of nodes that match the conditions defined by the predicate functions in the storeToNodeConditionLister.
func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) { func (s storeToNodeConditionLister) List() (nodes []*api.Node, err error) {
for _, m := range s.store.List() { for _, m := range s.store.List() {
node := m.(*api.Node) node := m.(*api.Node)
if s.predicate(node) { if s.predicate(node) {
nodes.Items = append(nodes.Items, *node) nodes = append(nodes, node)
} else { } else {
glog.V(5).Infof("Node %s matches none of the conditions", node.Name) glog.V(5).Infof("Node %s matches none of the conditions", node.Name)
} }

View File

@ -114,9 +114,9 @@ func TestStoreToNodeConditionLister(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
got := make([]string, len(gotNodes.Items)) got := make([]string, len(gotNodes))
for ix := range gotNodes.Items { for ix := range gotNodes {
got[ix] = gotNodes.Items[ix].Name got[ix] = gotNodes[ix].Name
} }
if !want.HasAll(got...) || len(got) != len(want) { if !want.HasAll(got...) || len(got) != len(want) {
t.Errorf("Expected %v, got %v", want, got) t.Errorf("Expected %v, got %v", want, got)

View File

@ -629,14 +629,27 @@ func stringSlicesEqual(x, y []string) bool {
return true return true
} }
func includeNodeFromNodeList(node *api.Node) bool {
return !node.Spec.Unschedulable
}
func hostsFromNodeList(list *api.NodeList) []string { func hostsFromNodeList(list *api.NodeList) []string {
result := []string{} result := []string{}
for ix := range list.Items { for ix := range list.Items {
if list.Items[ix].Spec.Unschedulable { if includeNodeFromNodeList(&list.Items[ix]) {
continue
}
result = append(result, list.Items[ix].Name) result = append(result, list.Items[ix].Name)
} }
}
return result
}
func hostsFromNodeSlice(nodes []*api.Node) []string {
result := []string{}
for _, node := range nodes {
if includeNodeFromNodeList(node) {
result = append(result, node.Name)
}
}
return result return result
} }
@ -675,7 +688,7 @@ func (s *ServiceController) nodeSyncLoop(period time.Duration) {
glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err) glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
continue continue
} }
newHosts := hostsFromNodeList(&nodes) newHosts := hostsFromNodeSlice(nodes)
if stringSlicesEqual(newHosts, prevHosts) { if stringSlicesEqual(newHosts, prevHosts) {
// The set of nodes in the cluster hasn't changed, but we can retry // The set of nodes in the cluster hasn't changed, but we can retry
// updating any services that we failed to update last time around. // updating any services that we failed to update last time around.

View File

@ -27,20 +27,23 @@ import (
// NodeLister interface represents anything that can list nodes for a scheduler. // NodeLister interface represents anything that can list nodes for a scheduler.
type NodeLister interface { type NodeLister interface {
List() (list api.NodeList, err error) // We explicitly return []*api.Node, instead of api.NodeList, to avoid
// performing expensive copies that are unneded.
List() ([]*api.Node, error)
} }
// FakeNodeLister implements NodeLister on a []string for test purposes. // FakeNodeLister implements NodeLister on a []string for test purposes.
type FakeNodeLister api.NodeList type FakeNodeLister []*api.Node
// List returns nodes as a []string. // List returns nodes as a []string.
func (f FakeNodeLister) List() (api.NodeList, error) { func (f FakeNodeLister) List() ([]*api.Node, error) {
return api.NodeList(f), nil return f, nil
} }
// PodLister interface represents anything that can list pods for a scheduler. // PodLister interface represents anything that can list pods for a scheduler.
type PodLister interface { type PodLister interface {
// TODO: make this exactly the same as client's Pods(ns).List() method, by returning a api.PodList // We explicitly return []*api.Pod, instead of api.PodList, to avoid
// performing expensive copies that are unneded.
List(labels.Selector) ([]*api.Pod, error) List(labels.Selector) ([]*api.Pod, error)
} }

View File

@ -101,12 +101,12 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
var maxCount int var maxCount int
var minCount int var minCount int
counts := map[string]int{} counts := map[string]int{}
for _, node := range nodes.Items { for _, node := range nodes {
totalCount := 0 totalCount := 0
// count weights for the weighted pod affinity // count weights for the weighted pod affinity
if affinity.PodAffinity != nil { if affinity.PodAffinity != nil {
for _, weightedTerm := range affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { for _, weightedTerm := range affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, weightedTerm.Weight, weightedTerm.PodAffinityTerm, &node) weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, weightedTerm.Weight, weightedTerm.PodAffinityTerm, node)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -117,7 +117,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
// count weights for the weighted pod anti-affinity // count weights for the weighted pod anti-affinity
if affinity.PodAntiAffinity != nil { if affinity.PodAntiAffinity != nil {
for _, weightedTerm := range affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { for _, weightedTerm := range affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, (0 - weightedTerm.Weight), weightedTerm.PodAffinityTerm, &node) weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, (0 - weightedTerm.Weight), weightedTerm.PodAffinityTerm, node)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -146,7 +146,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
//} //}
for _, epAffinityTerm := range podAffinityTerms { for _, epAffinityTerm := range podAffinityTerms {
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epAffinityTerm, match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epAffinityTerm,
func(pod *api.Pod) (*api.Node, error) { return &node, nil }, func(pod *api.Pod) (*api.Node, error) { return node, nil },
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) }, func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
) )
if err != nil { if err != nil {
@ -161,7 +161,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
// count weight for the weighted pod affinity indicated by the existing pod. // count weight for the weighted pod affinity indicated by the existing pod.
for _, epWeightedTerm := range epAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { for _, epWeightedTerm := range epAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm, match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm,
func(pod *api.Pod) (*api.Node, error) { return &node, nil }, func(pod *api.Pod) (*api.Node, error) { return node, nil },
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) }, func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
) )
if err != nil { if err != nil {
@ -177,7 +177,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
if epAffinity.PodAntiAffinity != nil { if epAffinity.PodAntiAffinity != nil {
for _, epWeightedTerm := range epAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { for _, epWeightedTerm := range epAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm, match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm,
func(pod *api.Pod) (*api.Node, error) { return &node, nil }, func(pod *api.Pod) (*api.Node, error) { return node, nil },
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) }, func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
) )
if err != nil { if err != nil {
@ -201,7 +201,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
// calculate final priority score for each node // calculate final priority score for each node
result := []schedulerapi.HostPriority{} result := []schedulerapi.HostPriority{}
for _, node := range nodes.Items { for _, node := range nodes {
fScore := float64(0) fScore := float64(0)
if (maxCount - minCount) > 0 { if (maxCount - minCount) > 0 {
fScore = 10 * (float64(counts[node.Name]-minCount) / float64(maxCount-minCount)) fScore = 10 * (float64(counts[node.Name]-minCount) / float64(maxCount-minCount))

View File

@ -30,12 +30,12 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
) )
type FakeNodeListInfo []api.Node type FakeNodeListInfo []*api.Node
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) { func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
for _, node := range nodes { for _, node := range nodes {
if node.Name == nodeName { if node.Name == nodeName {
return &node, nil return node, nil
} }
} }
return nil, fmt.Errorf("Unable to find node: %s", nodeName) return nil, fmt.Errorf("Unable to find node: %s", nodeName)
@ -252,13 +252,13 @@ func TestInterPodAffinityPriority(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []*api.Node
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
}{ }{
{ {
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}}, pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
@ -276,7 +276,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
@ -294,7 +294,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
@ -316,7 +316,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
@ -334,7 +334,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
@ -350,7 +350,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
@ -364,7 +364,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
@ -385,7 +385,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
}, },
@ -398,7 +398,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
}, },
@ -412,7 +412,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
}, },
@ -426,7 +426,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
}, },
@ -440,7 +440,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
}, },
@ -462,7 +462,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, {Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
@ -485,7 +485,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}}, {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}}, {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
@ -499,12 +499,12 @@ func TestInterPodAffinityPriority(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
interPodAffinity := InterPodAffinity{ interPodAffinity := InterPodAffinity{
info: FakeNodeListInfo(test.nodes), info: FakeNodeListInfo(test.nodes),
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), nodeLister: algorithm.FakeNodeLister(test.nodes),
podLister: algorithm.FakePodLister(test.pods), podLister: algorithm.FakePodLister(test.pods),
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
} }
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -548,7 +548,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []*api.Node
hardPodAffinityWeight int hardPodAffinityWeight int
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
@ -559,7 +559,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
@ -574,7 +574,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
@ -588,11 +588,11 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
ipa := InterPodAffinity{ ipa := InterPodAffinity{
info: FakeNodeListInfo(test.nodes), info: FakeNodeListInfo(test.nodes),
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), nodeLister: algorithm.FakeNodeLister(test.nodes),
podLister: algorithm.FakePodLister(test.pods), podLister: algorithm.FakePodLister(test.pods),
hardPodAffinityWeight: test.hardPodAffinityWeight, hardPodAffinityWeight: test.hardPodAffinityWeight,
} }
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -634,7 +634,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []*api.Node
failureDomains priorityutil.Topologies failureDomains priorityutil.Topologies
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
@ -645,7 +645,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
}, },
@ -659,7 +659,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
}, },
@ -672,12 +672,12 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
ipa := InterPodAffinity{ ipa := InterPodAffinity{
info: FakeNodeListInfo(test.nodes), info: FakeNodeListInfo(test.nodes),
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), nodeLister: algorithm.FakeNodeLister(test.nodes),
podLister: algorithm.FakePodLister(test.pods), podLister: algorithm.FakePodLister(test.pods),
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
failureDomains: test.failureDomains, failureDomains: test.failureDomains,
} }
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }

View File

@ -37,7 +37,7 @@ func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*sche
} }
var maxCount float64 var maxCount float64
counts := make(map[string]float64, len(nodes.Items)) counts := make(map[string]float64, len(nodes))
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
if err != nil { if err != nil {
@ -59,7 +59,7 @@ func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*sche
return nil, err return nil, err
} }
for _, node := range nodes.Items { for _, node := range nodes {
if nodeSelector.Matches(labels.Set(node.Labels)) { if nodeSelector.Matches(labels.Set(node.Labels)) {
counts[node.Name] += float64(preferredSchedulingTerm.Weight) counts[node.Name] += float64(preferredSchedulingTerm.Weight)
} }
@ -71,9 +71,8 @@ func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*sche
} }
} }
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items)) result := make(schedulerapi.HostPriorityList, 0, len(nodes))
for i := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[i]
if maxCount > 0 { if maxCount > 0 {
fScore := 10 * (counts[node.Name] / maxCount) fScore := 10 * (counts[node.Name] / maxCount)
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})

View File

@ -93,7 +93,7 @@ func TestNodeAffinityPriority(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
nodes []api.Node nodes []*api.Node
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
}{ }{
@ -103,7 +103,7 @@ func TestNodeAffinityPriority(t *testing.T) {
Annotations: map[string]string{}, Annotations: map[string]string{},
}, },
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -117,7 +117,7 @@ func TestNodeAffinityPriority(t *testing.T) {
Annotations: affinity1, Annotations: affinity1,
}, },
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label4}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label4}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -131,7 +131,7 @@ func TestNodeAffinityPriority(t *testing.T) {
Annotations: affinity1, Annotations: affinity1,
}, },
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -145,7 +145,7 @@ func TestNodeAffinityPriority(t *testing.T) {
Annotations: affinity2, Annotations: affinity2,
}, },
}, },
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}}, {ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
@ -156,7 +156,7 @@ func TestNodeAffinityPriority(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
list, err := CalculateNodeAffinityPriority(test.pod, schedulercache.CreateNodeNameToInfoMap(nil), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := CalculateNodeAffinityPriority(test.pod, schedulercache.CreateNodeNameToInfoMap(nil), algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }

View File

@ -86,9 +86,8 @@ func LeastRequestedPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulerca
return schedulerapi.HostPriorityList{}, err return schedulerapi.HostPriorityList{}, err
} }
list := make(schedulerapi.HostPriorityList, 0, len(nodes.Items)) list := make(schedulerapi.HostPriorityList, 0, len(nodes))
for i := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[i]
list = append(list, calculateResourceOccupancy(pod, node, nodeNameToInfo[node.Name])) list = append(list, calculateResourceOccupancy(pod, node, nodeNameToInfo[node.Name]))
} }
return list, nil return list, nil
@ -118,12 +117,12 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, nodeName
} }
labeledNodes := map[string]bool{} labeledNodes := map[string]bool{}
for _, node := range nodes.Items { for _, node := range nodes {
exists := labels.Set(node.Labels).Has(n.label) exists := labels.Set(node.Labels).Has(n.label)
labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence) labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence)
} }
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items)) result := make(schedulerapi.HostPriorityList, 0, len(nodes))
//score int - scale of 0-10 //score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest // 0 being the lowest priority and 10 being the highest
for nodeName, success := range labeledNodes { for nodeName, success := range labeledNodes {
@ -158,8 +157,7 @@ func ImageLocalityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercac
} }
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
for j := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[j]
// Check if this container's image is present and get its size. // Check if this container's image is present and get its size.
imageSize := checkContainerImageOnNode(node, &pod.Spec.Containers[i]) imageSize := checkContainerImageOnNode(node, &pod.Spec.Containers[i])
// Add this size to the total result of this node. // Add this size to the total result of this node.
@ -167,7 +165,7 @@ func ImageLocalityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercac
} }
} }
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items)) result := make(schedulerapi.HostPriorityList, 0, len(nodes))
// score int - scale of 0-10 // score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest. // 0 being the lowest priority and 10 being the highest.
for nodeName, sumSize := range sumSizeMap { for nodeName, sumSize := range sumSizeMap {
@ -222,9 +220,8 @@ func BalancedResourceAllocation(pod *api.Pod, nodeNameToInfo map[string]*schedul
return schedulerapi.HostPriorityList{}, err return schedulerapi.HostPriorityList{}, err
} }
list := make(schedulerapi.HostPriorityList, 0, len(nodes.Items)) list := make(schedulerapi.HostPriorityList, 0, len(nodes))
for i := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[i]
list = append(list, calculateBalancedResourceAllocation(pod, node, nodeNameToInfo[node.Name])) list = append(list, calculateBalancedResourceAllocation(pod, node, nodeNameToInfo[node.Name]))
} }
return list, nil return list, nil

View File

@ -35,8 +35,8 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
) )
func makeNode(node string, milliCPU, memory int64) api.Node { func makeNode(node string, milliCPU, memory int64) *api.Node {
return api.Node{ return &api.Node{
ObjectMeta: api.ObjectMeta{Name: node}, ObjectMeta: api.ObjectMeta{Name: node},
Status: api.NodeStatus{ Status: api.NodeStatus{
Capacity: api.ResourceList{ Capacity: api.ResourceList{
@ -99,7 +99,7 @@ func TestZeroRequest(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []*api.Node
test string test string
}{ }{
// The point of these next two tests is to show you get the same priority for a zero-request pod // The point of these next two tests is to show you get the same priority for a zero-request pod
@ -107,7 +107,7 @@ func TestZeroRequest(t *testing.T) {
// and when the zero-request pod is the one being scheduled. // and when the zero-request pod is the one being scheduled.
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of zero-request pod with machine with zero-request pod", test: "test priority of zero-request pod with machine with zero-request pod",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
@ -116,7 +116,7 @@ func TestZeroRequest(t *testing.T) {
}, },
{ {
pod: &api.Pod{Spec: small}, pod: &api.Pod{Spec: small},
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of nonzero-request pod with machine with zero-request pod", test: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
@ -126,7 +126,7 @@ func TestZeroRequest(t *testing.T) {
// The point of this test is to verify that we're not just getting the same score no matter what we schedule. // The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{ {
pod: &api.Pod{Spec: large}, pod: &api.Pod{Spec: large},
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
test: "test priority of larger pod with machine with zero-request pod", test: "test priority of larger pod with machine with zero-request pod",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
@ -145,7 +145,7 @@ func TestZeroRequest(t *testing.T) {
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want // plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
// to test what's actually in production. // to test what's actually in production.
[]algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{}), algorithm.FakeReplicaSetLister([]extensions.ReplicaSet{})), Weight: 1}}, []algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{}), algorithm.FakeReplicaSetLister([]extensions.ReplicaSet{})), Weight: 1}},
algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), []algorithm.SchedulerExtender{}) algorithm.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{})
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -228,7 +228,7 @@ func TestLeastRequested(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []*api.Node
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
}{ }{
@ -245,7 +245,7 @@ func TestLeastRequested(t *testing.T) {
Node2 Score: (10 + 10) / 2 = 10 Node2 Score: (10 + 10) / 2 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested", test: "nothing scheduled, nothing requested",
}, },
@ -262,7 +262,7 @@ func TestLeastRequested(t *testing.T) {
Node2 Score: (5 + 5) / 2 = 5 Node2 Score: (5 + 5) / 2 = 5
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 3}, {"machine2", 5}}, expectedList: []schedulerapi.HostPriority{{"machine1", 3}, {"machine2", 5}},
test: "nothing scheduled, resources requested, differently sized machines", test: "nothing scheduled, resources requested, differently sized machines",
}, },
@ -279,7 +279,7 @@ func TestLeastRequested(t *testing.T) {
Node2 Score: (10 + 10) / 2 = 10 Node2 Score: (10 + 10) / 2 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled", test: "no resources requested, pods scheduled",
pods: []*api.Pod{ pods: []*api.Pod{
@ -302,7 +302,7 @@ func TestLeastRequested(t *testing.T) {
Node2 Score: (4 + 7.5) / 2 = 5 Node2 Score: (4 + 7.5) / 2 = 5
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 5}}, expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 5}},
test: "no resources requested, pods scheduled with resources", test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -325,7 +325,7 @@ func TestLeastRequested(t *testing.T) {
Node2 Score: (4 + 5) / 2 = 4 Node2 Score: (4 + 5) / 2 = 4
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 4}}, expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 4}},
test: "resources requested, pods scheduled with resources", test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -346,7 +346,7 @@ func TestLeastRequested(t *testing.T) {
Node2 Score: (4 + 8) / 2 = 6 Node2 Score: (4 + 8) / 2 = 6
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 6}}, expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines", test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{ pods: []*api.Pod{
@ -367,7 +367,7 @@ func TestLeastRequested(t *testing.T) {
Node2 Score: (0 + 5) / 2 = 2 Node2 Score: (0 + 5) / 2 = 2
*/ */
pod: &api.Pod{Spec: cpuOnly}, pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 2}}, expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 2}},
test: "requested resources exceed node capacity", test: "requested resources exceed node capacity",
pods: []*api.Pod{ pods: []*api.Pod{
@ -377,7 +377,7 @@ func TestLeastRequested(t *testing.T) {
}, },
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero node resources, pods scheduled with resources", test: "zero node resources, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -389,7 +389,7 @@ func TestLeastRequested(t *testing.T) {
for _, test := range tests { for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
list, err := LeastRequestedPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := LeastRequestedPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -404,14 +404,14 @@ func TestNewNodeLabelPriority(t *testing.T) {
label2 := map[string]string{"bar": "foo"} label2 := map[string]string{"bar": "foo"}
label3 := map[string]string{"bar": "baz"} label3 := map[string]string{"bar": "baz"}
tests := []struct { tests := []struct {
nodes []api.Node nodes []*api.Node
label string label string
presence bool presence bool
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
}{ }{
{ {
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -422,7 +422,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
test: "no match found, presence true", test: "no match found, presence true",
}, },
{ {
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -433,7 +433,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
test: "no match found, presence false", test: "no match found, presence false",
}, },
{ {
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -444,7 +444,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
test: "one match found, presence true", test: "one match found, presence true",
}, },
{ {
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -455,7 +455,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
test: "one match found, presence false", test: "one match found, presence false",
}, },
{ {
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -466,7 +466,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
test: "two matches found, presence true", test: "two matches found, presence true",
}, },
{ {
nodes: []api.Node{ nodes: []*api.Node{
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
@ -483,7 +483,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
label: test.label, label: test.label,
presence: test.presence, presence: test.presence,
} }
list, err := prioritizer.CalculateNodeLabelPriority(nil, map[string]*schedulercache.NodeInfo{}, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := prioritizer.CalculateNodeLabelPriority(nil, map[string]*schedulercache.NodeInfo{}, algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -561,7 +561,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []*api.Node
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
}{ }{
@ -578,7 +578,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Node2 Score: 10 - (0-0)*10 = 10 Node2 Score: 10 - (0-0)*10 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested", test: "nothing scheduled, nothing requested",
}, },
@ -595,7 +595,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Node2 Score: 10 - (0.5-0.5)*10 = 10 Node2 Score: 10 - (0.5-0.5)*10 = 10
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 10}}, expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 10}},
test: "nothing scheduled, resources requested, differently sized machines", test: "nothing scheduled, resources requested, differently sized machines",
}, },
@ -612,7 +612,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Node2 Score: 10 - (0-0)*10 = 10 Node2 Score: 10 - (0-0)*10 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled", test: "no resources requested, pods scheduled",
pods: []*api.Pod{ pods: []*api.Pod{
@ -635,7 +635,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Node2 Score: 10 - (0.6-0.25)*10 = 6 Node2 Score: 10 - (0.6-0.25)*10 = 6
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 4}, {"machine2", 6}}, expectedList: []schedulerapi.HostPriority{{"machine1", 4}, {"machine2", 6}},
test: "no resources requested, pods scheduled with resources", test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -658,7 +658,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Node2 Score: 10 - (0.6-0.5)*10 = 9 Node2 Score: 10 - (0.6-0.5)*10 = 9
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 9}}, expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 9}},
test: "resources requested, pods scheduled with resources", test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -679,7 +679,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Node2 Score: 10 - (0.6-0.2)*10 = 6 Node2 Score: 10 - (0.6-0.2)*10 = 6
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 6}}, expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines", test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{ pods: []*api.Pod{
@ -700,7 +700,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
Node2 Score: 0 Node2 Score: 0
*/ */
pod: &api.Pod{Spec: cpuOnly}, pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "requested resources exceed node capacity", test: "requested resources exceed node capacity",
pods: []*api.Pod{ pods: []*api.Pod{
@ -710,7 +710,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
}, },
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero node resources, pods scheduled with resources", test: "zero node resources, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -722,7 +722,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
for _, test := range tests { for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
list, err := BalancedResourceAllocation(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := BalancedResourceAllocation(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -813,7 +813,7 @@ func TestImageLocalityPriority(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
pods []*api.Pod pods []*api.Pod
nodes []api.Node nodes []*api.Node
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
}{ }{
@ -828,7 +828,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Image: gcr.io/250 250MB // Image: gcr.io/250 250MB
// Score: (250M-23M)/97.7M + 1 = 3 // Score: (250M-23M)/97.7M + 1 = 3
pod: &api.Pod{Spec: test_40_250}, pod: &api.Pod{Spec: test_40_250},
nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine2", 3}}, expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine2", 3}},
test: "two images spread on two nodes, prefer the larger image one", test: "two images spread on two nodes, prefer the larger image one",
}, },
@ -843,7 +843,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Image: not present // Image: not present
// Score: 0 // Score: 0
pod: &api.Pod{Spec: test_40_140}, pod: &api.Pod{Spec: test_40_140},
nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 0}}, expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 0}},
test: "two images on one node, prefer this node", test: "two images on one node, prefer this node",
}, },
@ -858,7 +858,7 @@ func TestImageLocalityPriority(t *testing.T) {
// Image: gcr.io/10 10MB // Image: gcr.io/10 10MB
// Score: 10 < min score = 0 // Score: 10 < min score = 0
pod: &api.Pod{Spec: test_min_max}, pod: &api.Pod{Spec: test_min_max},
nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}}, expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
test: "if exceed limit, use limit", test: "if exceed limit, use limit",
}, },
@ -866,7 +866,7 @@ func TestImageLocalityPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
list, err := ImageLocalityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) list, err := ImageLocalityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -880,8 +880,8 @@ func TestImageLocalityPriority(t *testing.T) {
} }
} }
func makeImageNode(node string, status api.NodeStatus) api.Node { func makeImageNode(node string, status api.NodeStatus) *api.Node {
return api.Node{ return &api.Node{
ObjectMeta: api.ObjectMeta{Name: node}, ObjectMeta: api.ObjectMeta{Name: node},
Status: status, Status: status,
} }

View File

@ -117,12 +117,13 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo ma
// Create a number of go-routines that will be computing number // Create a number of go-routines that will be computing number
// of "similar" pods for given nodes. // of "similar" pods for given nodes.
workers := 16 workers := 16
toProcess := make(chan string, len(nodes.Items)) toProcess := make(chan string, len(nodes))
for i := range nodes.Items { for i := range nodes {
toProcess <- nodes.Items[i].Name toProcess <- nodes[i].Name
} }
close(toProcess) close(toProcess)
// TODO: Use Parallelize.
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(workers) wg.Add(workers)
for i := 0; i < workers; i++ { for i := 0; i < workers; i++ {
@ -181,9 +182,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo ma
// Count similar pods by zone, if zone information is present // Count similar pods by zone, if zone information is present
countsByZone := map[string]int{} countsByZone := map[string]int{}
for i := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[i]
count, found := countsByNodeName[node.Name] count, found := countsByNodeName[node.Name]
if !found { if !found {
continue continue
@ -207,11 +206,10 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo ma
} }
} }
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items)) result := make(schedulerapi.HostPriorityList, 0, len(nodes))
//score int - scale of 0-maxPriority //score int - scale of 0-maxPriority
// 0 being the lowest priority and maxPriority being the highest // 0 being the lowest priority and maxPriority being the highest
for i := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[i]
// initializing to the default/max node score of maxPriority // initializing to the default/max node score of maxPriority
fScore := float32(maxPriority) fScore := float32(maxPriority)
if maxCountByNodeName > 0 { if maxCountByNodeName > 0 {
@ -281,7 +279,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, nodeNa
// separate out the nodes that have the label from the ones that don't // separate out the nodes that have the label from the ones that don't
otherNodes := []string{} otherNodes := []string{}
labeledNodes := map[string]string{} labeledNodes := map[string]string{}
for _, node := range nodes.Items { for _, node := range nodes {
if labels.Set(node.Labels).Has(s.label) { if labels.Set(node.Labels).Has(s.label) {
label := labels.Set(node.Labels).Get(s.label) label := labels.Set(node.Labels).Get(s.label)
labeledNodes[node.Name] = label labeledNodes[node.Name] = label

View File

@ -664,20 +664,18 @@ func TestZoneSpreadPriority(t *testing.T) {
} }
} }
func makeLabeledNodeList(nodeMap map[string]map[string]string) (result api.NodeList) { func makeLabeledNodeList(nodeMap map[string]map[string]string) []*api.Node {
nodes := []api.Node{} nodes := make([]*api.Node, 0, len(nodeMap))
for nodeName, labels := range nodeMap { for nodeName, labels := range nodeMap {
nodes = append(nodes, api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}}) nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
} }
return api.NodeList{Items: nodes} return nodes
} }
func makeNodeList(nodeNames []string) api.NodeList { func makeNodeList(nodeNames []string) []*api.Node {
result := api.NodeList{ nodes := make([]*api.Node, 0, len(nodeNames))
Items: make([]api.Node, len(nodeNames)), for _, nodeName := range nodeNames {
nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}})
} }
for ix := range nodeNames { return nodes
result.Items[ix].Name = nodeNames[ix]
}
return result
} }

View File

@ -61,7 +61,7 @@ func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*sch
// the max value of counts // the max value of counts
var maxCount float64 var maxCount float64
// counts hold the count of intolerable taints of a pod for a given node // counts hold the count of intolerable taints of a pod for a given node
counts := make(map[string]float64, len(nodes.Items)) counts := make(map[string]float64, len(nodes))
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations) tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
if err != nil { if err != nil {
@ -71,8 +71,7 @@ func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*sch
tolerationList := getAllTolerationPreferNoSchedule(tolerations) tolerationList := getAllTolerationPreferNoSchedule(tolerations)
// calculate the intolerable taints for all the nodes // calculate the intolerable taints for all the nodes
for i := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[i]
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
if err != nil { if err != nil {
return nil, err return nil, err
@ -88,9 +87,8 @@ func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*sch
// The maximum priority value to give to a node // The maximum priority value to give to a node
// Priority values range from 0 - maxPriority // Priority values range from 0 - maxPriority
const maxPriority = float64(10) const maxPriority = float64(10)
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items)) result := make(schedulerapi.HostPriorityList, 0, len(nodes))
for i := range nodes.Items { for _, node := range nodes {
node := &nodes.Items[i]
fScore := maxPriority fScore := maxPriority
if maxCount > 0 { if maxCount > 0 {
fScore = (1.0 - counts[node.Name]/maxCount) * 10 fScore = (1.0 - counts[node.Name]/maxCount) * 10

View File

@ -27,9 +27,9 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
) )
func nodeWithTaints(nodeName string, taints []api.Taint) api.Node { func nodeWithTaints(nodeName string, taints []api.Taint) *api.Node {
taintsData, _ := json.Marshal(taints) taintsData, _ := json.Marshal(taints)
return api.Node{ return &api.Node{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: nodeName, Name: nodeName,
Annotations: map[string]string{ Annotations: map[string]string{
@ -57,7 +57,7 @@ func podWithTolerations(tolerations []api.Toleration) *api.Pod {
func TestTaintAndToleration(t *testing.T) { func TestTaintAndToleration(t *testing.T) {
tests := []struct { tests := []struct {
pod *api.Pod pod *api.Pod
nodes []api.Node nodes []*api.Node
expectedList schedulerapi.HostPriorityList expectedList schedulerapi.HostPriorityList
test string test string
}{ }{
@ -70,7 +70,7 @@ func TestTaintAndToleration(t *testing.T) {
Value: "bar", Value: "bar",
Effect: api.TaintEffectPreferNoSchedule, Effect: api.TaintEffectPreferNoSchedule,
}}), }}),
nodes: []api.Node{ nodes: []*api.Node{
nodeWithTaints("nodeA", []api.Taint{{ nodeWithTaints("nodeA", []api.Taint{{
Key: "foo", Key: "foo",
Value: "bar", Value: "bar",
@ -103,7 +103,7 @@ func TestTaintAndToleration(t *testing.T) {
Effect: api.TaintEffectPreferNoSchedule, Effect: api.TaintEffectPreferNoSchedule,
}, },
}), }),
nodes: []api.Node{ nodes: []*api.Node{
nodeWithTaints("nodeA", []api.Taint{}), nodeWithTaints("nodeA", []api.Taint{}),
nodeWithTaints("nodeB", []api.Taint{ nodeWithTaints("nodeB", []api.Taint{
{ {
@ -139,7 +139,7 @@ func TestTaintAndToleration(t *testing.T) {
Value: "bar", Value: "bar",
Effect: api.TaintEffectPreferNoSchedule, Effect: api.TaintEffectPreferNoSchedule,
}}), }}),
nodes: []api.Node{ nodes: []*api.Node{
nodeWithTaints("nodeA", []api.Taint{}), nodeWithTaints("nodeA", []api.Taint{}),
nodeWithTaints("nodeB", []api.Taint{ nodeWithTaints("nodeB", []api.Taint{
{ {
@ -182,7 +182,7 @@ func TestTaintAndToleration(t *testing.T) {
Effect: api.TaintEffectNoSchedule, Effect: api.TaintEffectNoSchedule,
}, },
}), }),
nodes: []api.Node{ nodes: []*api.Node{
nodeWithTaints("nodeA", []api.Taint{}), nodeWithTaints("nodeA", []api.Taint{}),
nodeWithTaints("nodeB", []api.Taint{ nodeWithTaints("nodeB", []api.Taint{
{ {
@ -215,7 +215,7 @@ func TestTaintAndToleration(t *testing.T) {
list, err := ComputeTaintTolerationPriority( list, err := ComputeTaintTolerationPriority(
test.pod, test.pod,
nodeNameToInfo, nodeNameToInfo,
algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) algorithm.FakeNodeLister(test.nodes))
if err != nil { if err != nil {
t.Errorf("%s, unexpected error: %v", test.test, err) t.Errorf("%s, unexpected error: %v", test.test, err)
} }

View File

@ -27,12 +27,12 @@ import (
type SchedulerExtender interface { type SchedulerExtender interface {
// Filter based on extender-implemented predicate functions. The filtered list is // Filter based on extender-implemented predicate functions. The filtered list is
// expected to be a subset of the supplied list. // expected to be a subset of the supplied list.
Filter(pod *api.Pod, nodes *api.NodeList) (filteredNodes *api.NodeList, err error) Filter(pod *api.Pod, nodes []*api.Node) (filteredNodes []*api.Node, err error)
// Prioritize based on extender-implemented priority functions. The returned scores & weight // Prioritize based on extender-implemented priority functions. The returned scores & weight
// are used to compute the weighted score for an extender. The weighted scores are added to // are used to compute the weighted score for an extender. The weighted scores are added to
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
Prioritize(pod *api.Pod, nodes *api.NodeList) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) Prioritize(pod *api.Pod, nodes []*api.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
} }
// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods // ScheduleAlgorithm is an interface implemented by things that know how to schedule pods

View File

@ -93,16 +93,20 @@ func NewHTTPExtender(config *schedulerapi.ExtenderConfig, apiVersion string) (al
// Filter based on extender implemented predicate functions. The filtered list is // Filter based on extender implemented predicate functions. The filtered list is
// expected to be a subset of the supplied list. // expected to be a subset of the supplied list.
func (h *HTTPExtender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, error) { func (h *HTTPExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, error) {
var result schedulerapi.ExtenderFilterResult var result schedulerapi.ExtenderFilterResult
if h.filterVerb == "" { if h.filterVerb == "" {
return nodes, nil return nodes, nil
} }
nodeItems := make([]api.Node, 0, len(nodes))
for _, node := range nodes {
nodeItems = append(nodeItems, *node)
}
args := schedulerapi.ExtenderArgs{ args := schedulerapi.ExtenderArgs{
Pod: *pod, Pod: *pod,
Nodes: *nodes, Nodes: api.NodeList{Items: nodeItems},
} }
if err := h.send(h.filterVerb, &args, &result); err != nil { if err := h.send(h.filterVerb, &args, &result); err != nil {
@ -111,26 +115,34 @@ func (h *HTTPExtender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList,
if result.Error != "" { if result.Error != "" {
return nil, fmt.Errorf(result.Error) return nil, fmt.Errorf(result.Error)
} }
return &result.Nodes, nil nodeResult := make([]*api.Node, 0, len(result.Nodes.Items))
for i := range result.Nodes.Items {
nodeResult = append(nodeResult, &result.Nodes.Items[i])
}
return nodeResult, nil
} }
// Prioritize based on extender implemented priority functions. Weight*priority is added // Prioritize based on extender implemented priority functions. Weight*priority is added
// up for each such priority function. The returned score is added to the score computed // up for each such priority function. The returned score is added to the score computed
// by Kubernetes scheduler. The total score is used to do the host selection. // by Kubernetes scheduler. The total score is used to do the host selection.
func (h *HTTPExtender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, int, error) { func (h *HTTPExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, int, error) {
var result schedulerapi.HostPriorityList var result schedulerapi.HostPriorityList
if h.prioritizeVerb == "" { if h.prioritizeVerb == "" {
result := schedulerapi.HostPriorityList{} result := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items { for _, node := range nodes {
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: 0}) result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: 0})
} }
return &result, 0, nil return &result, 0, nil
} }
nodeItems := make([]api.Node, 0, len(nodes))
for _, node := range nodes {
nodeItems = append(nodeItems, *node)
}
args := schedulerapi.ExtenderArgs{ args := schedulerapi.ExtenderArgs{
Pod: *pod, Pod: *pod,
Nodes: *nodes, Nodes: api.NodeList{Items: nodeItems},
} }
if err := h.send(h.prioritizeVerb, &args, &result); err != nil { if err := h.send(h.prioritizeVerb, &args, &result); err != nil {

View File

@ -28,7 +28,7 @@ import (
) )
type fitPredicate func(pod *api.Pod, node *api.Node) (bool, error) type fitPredicate func(pod *api.Pod, node *api.Node) (bool, error)
type priorityFunc func(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) type priorityFunc func(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error)
type priorityConfig struct { type priorityConfig struct {
function priorityFunc function priorityFunc
@ -61,13 +61,13 @@ func machine2PredicateExtender(pod *api.Pod, node *api.Node) (bool, error) {
return false, nil return false, nil
} }
func errorPrioritizerExtender(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) { func errorPrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) {
return &schedulerapi.HostPriorityList{}, fmt.Errorf("Some error") return &schedulerapi.HostPriorityList{}, fmt.Errorf("Some error")
} }
func machine1PrioritizerExtender(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) { func machine1PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) {
result := schedulerapi.HostPriorityList{} result := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items { for _, node := range nodes {
score := 1 score := 1
if node.Name == "machine1" { if node.Name == "machine1" {
score = 10 score = 10
@ -77,9 +77,9 @@ func machine1PrioritizerExtender(pod *api.Pod, nodes *api.NodeList) (*schedulera
return &result, nil return &result, nil
} }
func machine2PrioritizerExtender(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, error) { func machine2PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) {
result := schedulerapi.HostPriorityList{} result := schedulerapi.HostPriorityList{}
for _, node := range nodes.Items { for _, node := range nodes {
score := 1 score := 1
if node.Name == "machine2" { if node.Name == "machine2" {
score = 10 score = 10
@ -96,7 +96,7 @@ func machine2Prioritizer(_ *api.Pod, nodeNameToInfo map[string]*schedulercache.N
} }
result := []schedulerapi.HostPriority{} result := []schedulerapi.HostPriority{}
for _, node := range nodes.Items { for _, node := range nodes {
score := 1 score := 1
if node.Name == "machine2" { if node.Name == "machine2" {
score = 10 score = 10
@ -112,14 +112,14 @@ type FakeExtender struct {
weight int weight int
} }
func (f *FakeExtender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList, error) { func (f *FakeExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, error) {
filtered := []api.Node{} filtered := []*api.Node{}
for _, node := range nodes.Items { for _, node := range nodes {
fits := true fits := true
for _, predicate := range f.predicates { for _, predicate := range f.predicates {
fit, err := predicate(pod, &node) fit, err := predicate(pod, node)
if err != nil { if err != nil {
return &api.NodeList{}, err return []*api.Node{}, err
} }
if !fit { if !fit {
fits = false fits = false
@ -130,10 +130,10 @@ func (f *FakeExtender) Filter(pod *api.Pod, nodes *api.NodeList) (*api.NodeList,
filtered = append(filtered, node) filtered = append(filtered, node)
} }
} }
return &api.NodeList{Items: filtered}, nil return filtered, nil
} }
func (f *FakeExtender) Prioritize(pod *api.Pod, nodes *api.NodeList) (*schedulerapi.HostPriorityList, int, error) { func (f *FakeExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, int, error) {
result := schedulerapi.HostPriorityList{} result := schedulerapi.HostPriorityList{}
combinedScores := map[string]int{} combinedScores := map[string]int{}
for _, prioritizer := range f.prioritizers { for _, prioritizer := range f.prioritizers {

View File

@ -82,7 +82,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeListe
if err != nil { if err != nil {
return "", err return "", err
} }
if len(nodes.Items) == 0 { if len(nodes) == 0 {
return "", ErrNoNodesAvailable return "", ErrNoNodesAvailable
} }
@ -98,7 +98,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeListe
return "", err return "", err
} }
if len(filteredNodes.Items) == 0 { if len(filteredNodes) == 0 {
return "", &FitError{ return "", &FitError{
Pod: pod, Pod: pod,
FailedPredicates: failedPredicateMap, FailedPredicates: failedPredicateMap,
@ -136,19 +136,19 @@ func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList
// Filters the nodes to find the ones that fit based on the given predicate functions // Filters the nodes to find the ones that fit based on the given predicate functions
// Each node is passed through the predicate functions to determine if it is a fit // Each node is passed through the predicate functions to determine if it is a fit
func findNodesThatFit(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate, nodes api.NodeList, extenders []algorithm.SchedulerExtender) (api.NodeList, FailedPredicateMap, error) { func findNodesThatFit(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate, nodes []*api.Node, extenders []algorithm.SchedulerExtender) ([]*api.Node, FailedPredicateMap, error) {
// Create filtered list with enough space to avoid growing it. // Create filtered list with enough space to avoid growing it.
filtered := make([]api.Node, 0, len(nodes.Items)) filtered := make([]*api.Node, 0, len(nodes))
failedPredicateMap := FailedPredicateMap{} failedPredicateMap := FailedPredicateMap{}
if len(predicateFuncs) == 0 { if len(predicateFuncs) == 0 {
filtered = nodes.Items filtered = nodes
} else { } else {
predicateResultLock := sync.Mutex{} predicateResultLock := sync.Mutex{}
errs := []error{} errs := []error{}
meta := predicates.PredicateMetadata(pod) meta := predicates.PredicateMetadata(pod)
checkNode := func(i int) { checkNode := func(i int) {
nodeName := nodes.Items[i].Name nodeName := nodes[i].Name
fits, failedPredicate, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs) fits, failedPredicate, err := podFitsOnNode(pod, meta, nodeNameToInfo[nodeName], predicateFuncs)
predicateResultLock.Lock() predicateResultLock.Lock()
@ -158,30 +158,30 @@ func findNodesThatFit(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No
return return
} }
if fits { if fits {
filtered = append(filtered, nodes.Items[i]) filtered = append(filtered, nodes[i])
} else { } else {
failedPredicateMap[nodeName] = failedPredicate failedPredicateMap[nodeName] = failedPredicate
} }
} }
workqueue.Parallelize(16, len(nodes.Items), checkNode) workqueue.Parallelize(16, len(nodes), checkNode)
if len(errs) > 0 { if len(errs) > 0 {
return api.NodeList{}, FailedPredicateMap{}, errors.NewAggregate(errs) return []*api.Node{}, FailedPredicateMap{}, errors.NewAggregate(errs)
} }
} }
if len(filtered) > 0 && len(extenders) != 0 { if len(filtered) > 0 && len(extenders) != 0 {
for _, extender := range extenders { for _, extender := range extenders {
filteredList, err := extender.Filter(pod, &api.NodeList{Items: filtered}) filteredList, err := extender.Filter(pod, filtered)
if err != nil { if err != nil {
return api.NodeList{}, FailedPredicateMap{}, err return []*api.Node{}, FailedPredicateMap{}, err
} }
filtered = filteredList.Items filtered = filteredList
if len(filtered) == 0 { if len(filtered) == 0 {
break break
} }
} }
} }
return api.NodeList{Items: filtered}, failedPredicateMap, nil return filtered, failedPredicateMap, nil
} }
// Checks whether node with a given name and NodeInfo satisfies all predicateFuncs. // Checks whether node with a given name and NodeInfo satisfies all predicateFuncs.
@ -288,7 +288,7 @@ func PrioritizeNodes(
wg.Add(1) wg.Add(1)
go func(ext algorithm.SchedulerExtender) { go func(ext algorithm.SchedulerExtender) {
defer wg.Done() defer wg.Done()
prioritizedList, weight, err := ext.Prioritize(pod, &nodes) prioritizedList, weight, err := ext.Prioritize(pod, nodes)
if err != nil { if err != nil {
// Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities // Prioritization errors from extender can be ignored, let k8s/other extenders determine the priorities
return return
@ -320,8 +320,8 @@ func EqualPriority(_ *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInf
return []schedulerapi.HostPriority{}, err return []schedulerapi.HostPriority{}, err
} }
result := []schedulerapi.HostPriority{} result := make(schedulerapi.HostPriorityList, len(nodes))
for _, node := range nodes.Items { for _, node := range nodes {
result = append(result, schedulerapi.HostPriority{ result = append(result, schedulerapi.HostPriority{
Host: node.Name, Host: node.Name,
Score: 1, Score: 1,

View File

@ -66,7 +66,7 @@ func numericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.Nod
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list nodes: %v", err) return nil, fmt.Errorf("failed to list nodes: %v", err)
} }
for _, node := range nodes.Items { for _, node := range nodes {
score, err := strconv.Atoi(node.Name) score, err := strconv.Atoi(node.Name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -102,12 +102,10 @@ func reverseNumericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulerca
return reverseResult, nil return reverseResult, nil
} }
func makeNodeList(nodeNames []string) api.NodeList { func makeNodeList(nodeNames []string) []*api.Node {
result := api.NodeList{ result := make([]*api.Node, 0, len(nodeNames))
Items: make([]api.Node, len(nodeNames)), for _, nodeName := range nodeNames {
} result = append(result, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}})
for ix := range nodeNames {
result.Items[ix].Name = nodeNames[ix]
} }
return result return result
} }

View File

@ -125,7 +125,7 @@ func TestScheduler(t *testing.T) {
}, },
}, },
NodeLister: algorithm.FakeNodeLister( NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, []*api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}},
), ),
Algorithm: item.algo, Algorithm: item.algo,
Binder: fakeBinder{func(b *api.Binding) error { Binder: fakeBinder{func(b *api.Binding) error {
@ -292,7 +292,7 @@ func setupTestSchedulerWithOnePod(t *testing.T, queuedPodStore *clientcache.FIFO
cfg := &Config{ cfg := &Config{
SchedulerCache: scache, SchedulerCache: scache,
NodeLister: algorithm.FakeNodeLister( NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, []*api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}},
), ),
Algorithm: algo, Algorithm: algo,
Binder: fakeBinder{func(b *api.Binding) error { Binder: fakeBinder{func(b *api.Binding) error {