mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-06 07:57:35 +00:00
Merge pull request #28781 from wojtek-t/optimize_priorities_2
Automatic merge from submit-queue Change storeToNodeConditionLister to return []*api.Node instead of api.NodeList for performance Currently copies that are made while copying/creating api.NodeList are significant part of scheduler profile, and a bunch of them are made in places, that are not-parallelizable. Ref #28590
This commit is contained in:
@@ -27,20 +27,23 @@ import (
|
||||
|
||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||
type NodeLister interface {
|
||||
List() (list api.NodeList, err error)
|
||||
// We explicitly return []*api.Node, instead of api.NodeList, to avoid
|
||||
// performing expensive copies that are unneded.
|
||||
List() ([]*api.Node, error)
|
||||
}
|
||||
|
||||
// FakeNodeLister implements NodeLister on a []string for test purposes.
|
||||
type FakeNodeLister api.NodeList
|
||||
type FakeNodeLister []*api.Node
|
||||
|
||||
// List returns nodes as a []string.
|
||||
func (f FakeNodeLister) List() (api.NodeList, error) {
|
||||
return api.NodeList(f), nil
|
||||
func (f FakeNodeLister) List() ([]*api.Node, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// PodLister interface represents anything that can list pods for a scheduler.
|
||||
type PodLister interface {
|
||||
// TODO: make this exactly the same as client's Pods(ns).List() method, by returning a api.PodList
|
||||
// We explicitly return []*api.Pod, instead of api.PodList, to avoid
|
||||
// performing expensive copies that are unneded.
|
||||
List(labels.Selector) ([]*api.Pod, error)
|
||||
}
|
||||
|
||||
|
||||
@@ -101,12 +101,12 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
var maxCount int
|
||||
var minCount int
|
||||
counts := map[string]int{}
|
||||
for _, node := range nodes.Items {
|
||||
for _, node := range nodes {
|
||||
totalCount := 0
|
||||
// count weights for the weighted pod affinity
|
||||
if affinity.PodAffinity != nil {
|
||||
for _, weightedTerm := range affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, weightedTerm.Weight, weightedTerm.PodAffinityTerm, &node)
|
||||
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, weightedTerm.Weight, weightedTerm.PodAffinityTerm, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -117,7 +117,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
// count weights for the weighted pod anti-affinity
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
for _, weightedTerm := range affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, (0 - weightedTerm.Weight), weightedTerm.PodAffinityTerm, &node)
|
||||
weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, (0 - weightedTerm.Weight), weightedTerm.PodAffinityTerm, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,7 +146,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
//}
|
||||
for _, epAffinityTerm := range podAffinityTerms {
|
||||
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epAffinityTerm,
|
||||
func(pod *api.Pod) (*api.Node, error) { return &node, nil },
|
||||
func(pod *api.Pod) (*api.Node, error) { return node, nil },
|
||||
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
|
||||
)
|
||||
if err != nil {
|
||||
@@ -161,7 +161,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
// count weight for the weighted pod affinity indicated by the existing pod.
|
||||
for _, epWeightedTerm := range epAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm,
|
||||
func(pod *api.Pod) (*api.Node, error) { return &node, nil },
|
||||
func(pod *api.Pod) (*api.Node, error) { return node, nil },
|
||||
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
|
||||
)
|
||||
if err != nil {
|
||||
@@ -177,7 +177,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
if epAffinity.PodAntiAffinity != nil {
|
||||
for _, epWeightedTerm := range epAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm,
|
||||
func(pod *api.Pod) (*api.Node, error) { return &node, nil },
|
||||
func(pod *api.Pod) (*api.Node, error) { return node, nil },
|
||||
func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) },
|
||||
)
|
||||
if err != nil {
|
||||
@@ -201,7 +201,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
|
||||
// calculate final priority score for each node
|
||||
result := []schedulerapi.HostPriority{}
|
||||
for _, node := range nodes.Items {
|
||||
for _, node := range nodes {
|
||||
fScore := float64(0)
|
||||
if (maxCount - minCount) > 0 {
|
||||
fScore = 10 * (float64(counts[node.Name]-minCount) / float64(maxCount-minCount))
|
||||
|
||||
@@ -30,12 +30,12 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
type FakeNodeListInfo []api.Node
|
||||
type FakeNodeListInfo []*api.Node
|
||||
|
||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
|
||||
for _, node := range nodes {
|
||||
if node.Name == nodeName {
|
||||
return &node, nil
|
||||
return node, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
||||
@@ -252,13 +252,13 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
@@ -276,7 +276,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
@@ -294,7 +294,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
@@ -316,7 +316,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
@@ -334,7 +334,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
@@ -350,7 +350,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
@@ -364,7 +364,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
@@ -385,7 +385,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
@@ -398,7 +398,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
@@ -412,7 +412,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
},
|
||||
@@ -426,7 +426,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||
},
|
||||
@@ -440,7 +440,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
},
|
||||
@@ -462,7 +462,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
@@ -485,7 +485,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
@@ -499,12 +499,12 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
interPodAffinity := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}),
|
||||
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
||||
}
|
||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -548,7 +548,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
hardPodAffinityWeight int
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
@@ -559,7 +559,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
@@ -574,7 +574,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
@@ -588,11 +588,11 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
ipa := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}),
|
||||
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
||||
}
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -634,7 +634,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
failureDomains priorityutil.Topologies
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
@@ -645,7 +645,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
},
|
||||
@@ -659,7 +659,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
},
|
||||
@@ -672,12 +672,12 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
ipa := InterPodAffinity{
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}),
|
||||
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
failureDomains: test.failureDomains,
|
||||
}
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*sche
|
||||
}
|
||||
|
||||
var maxCount float64
|
||||
counts := make(map[string]float64, len(nodes.Items))
|
||||
counts := make(map[string]float64, len(nodes))
|
||||
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
@@ -59,7 +59,7 @@ func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*sche
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
for _, node := range nodes {
|
||||
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||
counts[node.Name] += float64(preferredSchedulingTerm.Weight)
|
||||
}
|
||||
@@ -71,9 +71,8 @@ func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*sche
|
||||
}
|
||||
}
|
||||
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
if maxCount > 0 {
|
||||
fScore := 10 * (counts[node.Name] / maxCount)
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
|
||||
@@ -93,7 +93,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -103,7 +103,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -117,7 +117,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
Annotations: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label4}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -131,7 +131,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
Annotations: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -145,7 +145,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
Annotations: affinity2,
|
||||
},
|
||||
},
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
@@ -156,7 +156,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
list, err := CalculateNodeAffinityPriority(test.pod, schedulercache.CreateNodeNameToInfoMap(nil), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := CalculateNodeAffinityPriority(test.pod, schedulercache.CreateNodeNameToInfoMap(nil), algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
@@ -86,9 +86,8 @@ func LeastRequestedPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulerca
|
||||
return schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
|
||||
list := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
list := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
list = append(list, calculateResourceOccupancy(pod, node, nodeNameToInfo[node.Name]))
|
||||
}
|
||||
return list, nil
|
||||
@@ -118,12 +117,12 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, nodeName
|
||||
}
|
||||
|
||||
labeledNodes := map[string]bool{}
|
||||
for _, node := range nodes.Items {
|
||||
for _, node := range nodes {
|
||||
exists := labels.Set(node.Labels).Has(n.label)
|
||||
labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence)
|
||||
}
|
||||
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for nodeName, success := range labeledNodes {
|
||||
@@ -158,8 +157,7 @@ func ImageLocalityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercac
|
||||
}
|
||||
|
||||
for i := range pod.Spec.Containers {
|
||||
for j := range nodes.Items {
|
||||
node := &nodes.Items[j]
|
||||
for _, node := range nodes {
|
||||
// Check if this container's image is present and get its size.
|
||||
imageSize := checkContainerImageOnNode(node, &pod.Spec.Containers[i])
|
||||
// Add this size to the total result of this node.
|
||||
@@ -167,7 +165,7 @@ func ImageLocalityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercac
|
||||
}
|
||||
}
|
||||
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
// score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest.
|
||||
for nodeName, sumSize := range sumSizeMap {
|
||||
@@ -222,9 +220,8 @@ func BalancedResourceAllocation(pod *api.Pod, nodeNameToInfo map[string]*schedul
|
||||
return schedulerapi.HostPriorityList{}, err
|
||||
}
|
||||
|
||||
list := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
list := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
list = append(list, calculateBalancedResourceAllocation(pod, node, nodeNameToInfo[node.Name]))
|
||||
}
|
||||
return list, nil
|
||||
|
||||
@@ -35,8 +35,8 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func makeNode(node string, milliCPU, memory int64) api.Node {
|
||||
return api.Node{
|
||||
func makeNode(node string, milliCPU, memory int64) *api.Node {
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
@@ -99,7 +99,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
test string
|
||||
}{
|
||||
// The point of these next two tests is to show you get the same priority for a zero-request pod
|
||||
@@ -107,7 +107,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// and when the zero-request pod is the one being scheduled.
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
test: "test priority of zero-request pod with machine with zero-request pod",
|
||||
pods: []*api.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@@ -116,7 +116,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: small},
|
||||
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
test: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||
pods: []*api.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@@ -126,7 +126,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
||||
{
|
||||
pod: &api.Pod{Spec: large},
|
||||
nodes: []api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
test: "test priority of larger pod with machine with zero-request pod",
|
||||
pods: []*api.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@@ -145,7 +145,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
|
||||
// to test what's actually in production.
|
||||
[]algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakePodLister(test.pods), algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{}), algorithm.FakeReplicaSetLister([]extensions.ReplicaSet{})), Weight: 1}},
|
||||
algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), []algorithm.SchedulerExtender{})
|
||||
algorithm.FakeNodeLister(test.nodes), []algorithm.SchedulerExtender{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -228,7 +228,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -245,7 +245,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
Node2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
@@ -262,7 +262,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
Node2 Score: (5 + 5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 3}, {"machine2", 5}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
@@ -279,7 +279,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
Node2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
@@ -302,7 +302,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
Node2 Score: (4 + 7.5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 5}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@@ -325,7 +325,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
Node2 Score: (4 + 5) / 2 = 4
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 4}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@@ -346,7 +346,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
Node2 Score: (4 + 8) / 2 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
@@ -367,7 +367,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
Node2 Score: (0 + 5) / 2 = 2
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 5}, {"machine2", 2}},
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
@@ -377,7 +377,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@@ -389,7 +389,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
list, err := LeastRequestedPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := LeastRequestedPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -404,14 +404,14 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
label2 := map[string]string{"bar": "foo"}
|
||||
label3 := map[string]string{"bar": "baz"}
|
||||
tests := []struct {
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
label string
|
||||
presence bool
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -422,7 +422,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "no match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -433,7 +433,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "no match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -444,7 +444,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "one match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -455,7 +455,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "one match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -466,7 +466,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "two matches found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
@@ -483,7 +483,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
label: test.label,
|
||||
presence: test.presence,
|
||||
}
|
||||
list, err := prioritizer.CalculateNodeLabelPriority(nil, map[string]*schedulercache.NodeInfo{}, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := prioritizer.CalculateNodeLabelPriority(nil, map[string]*schedulercache.NodeInfo{}, algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -561,7 +561,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -578,7 +578,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Node2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
@@ -595,7 +595,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Node2 Score: 10 - (0.5-0.5)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 7}, {"machine2", 10}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
@@ -612,7 +612,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Node2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
@@ -635,7 +635,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Node2 Score: 10 - (0.6-0.25)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 4}, {"machine2", 6}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@@ -658,7 +658,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Node2 Score: 10 - (0.6-0.5)*10 = 9
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 9}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@@ -679,7 +679,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Node2 Score: 10 - (0.6-0.2)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 6}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
@@ -700,7 +700,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Node2 Score: 0
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
@@ -710,7 +710,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@@ -722,7 +722,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
list, err := BalancedResourceAllocation(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := BalancedResourceAllocation(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -813,7 +813,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -828,7 +828,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image: gcr.io/250 250MB
|
||||
// Score: (250M-23M)/97.7M + 1 = 3
|
||||
pod: &api.Pod{Spec: test_40_250},
|
||||
nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 1}, {"machine2", 3}},
|
||||
test: "two images spread on two nodes, prefer the larger image one",
|
||||
},
|
||||
@@ -843,7 +843,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image: not present
|
||||
// Score: 0
|
||||
pod: &api.Pod{Spec: test_40_140},
|
||||
nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 0}},
|
||||
test: "two images on one node, prefer this node",
|
||||
},
|
||||
@@ -858,7 +858,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Image: gcr.io/10 10MB
|
||||
// Score: 10 < min score = 0
|
||||
pod: &api.Pod{Spec: test_min_max},
|
||||
nodes: []api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}},
|
||||
test: "if exceed limit, use limit",
|
||||
},
|
||||
@@ -866,7 +866,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
|
||||
list, err := ImageLocalityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := ImageLocalityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -880,8 +880,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeImageNode(node string, status api.NodeStatus) api.Node {
|
||||
return api.Node{
|
||||
func makeImageNode(node string, status api.NodeStatus) *api.Node {
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
Status: status,
|
||||
}
|
||||
|
||||
@@ -117,12 +117,13 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo ma
|
||||
// Create a number of go-routines that will be computing number
|
||||
// of "similar" pods for given nodes.
|
||||
workers := 16
|
||||
toProcess := make(chan string, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
toProcess <- nodes.Items[i].Name
|
||||
toProcess := make(chan string, len(nodes))
|
||||
for i := range nodes {
|
||||
toProcess <- nodes[i].Name
|
||||
}
|
||||
close(toProcess)
|
||||
|
||||
// TODO: Use Parallelize.
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
@@ -181,9 +182,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo ma
|
||||
|
||||
// Count similar pods by zone, if zone information is present
|
||||
countsByZone := map[string]int{}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
|
||||
for _, node := range nodes {
|
||||
count, found := countsByNodeName[node.Name]
|
||||
if !found {
|
||||
continue
|
||||
@@ -207,11 +206,10 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo ma
|
||||
}
|
||||
}
|
||||
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
//score int - scale of 0-maxPriority
|
||||
// 0 being the lowest priority and maxPriority being the highest
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
for _, node := range nodes {
|
||||
// initializing to the default/max node score of maxPriority
|
||||
fScore := float32(maxPriority)
|
||||
if maxCountByNodeName > 0 {
|
||||
@@ -281,7 +279,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, nodeNa
|
||||
// separate out the nodes that have the label from the ones that don't
|
||||
otherNodes := []string{}
|
||||
labeledNodes := map[string]string{}
|
||||
for _, node := range nodes.Items {
|
||||
for _, node := range nodes {
|
||||
if labels.Set(node.Labels).Has(s.label) {
|
||||
label := labels.Set(node.Labels).Get(s.label)
|
||||
labeledNodes[node.Name] = label
|
||||
|
||||
@@ -664,20 +664,18 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) (result api.NodeList) {
|
||||
nodes := []api.Node{}
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*api.Node {
|
||||
nodes := make([]*api.Node, 0, len(nodeMap))
|
||||
for nodeName, labels := range nodeMap {
|
||||
nodes = append(nodes, api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
}
|
||||
return api.NodeList{Items: nodes}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func makeNodeList(nodeNames []string) api.NodeList {
|
||||
result := api.NodeList{
|
||||
Items: make([]api.Node, len(nodeNames)),
|
||||
func makeNodeList(nodeNames []string) []*api.Node {
|
||||
nodes := make([]*api.Node, 0, len(nodeNames))
|
||||
for _, nodeName := range nodeNames {
|
||||
nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}})
|
||||
}
|
||||
for ix := range nodeNames {
|
||||
result.Items[ix].Name = nodeNames[ix]
|
||||
}
|
||||
return result
|
||||
return nodes
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*sch
|
||||
// the max value of counts
|
||||
var maxCount float64
|
||||
// counts hold the count of intolerable taints of a pod for a given node
|
||||
counts := make(map[string]float64, len(nodes.Items))
|
||||
counts := make(map[string]float64, len(nodes))
|
||||
|
||||
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
@@ -71,8 +71,7 @@ func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*sch
|
||||
tolerationList := getAllTolerationPreferNoSchedule(tolerations)
|
||||
|
||||
// calculate the intolerable taints for all the nodes
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
for _, node := range nodes {
|
||||
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -88,9 +87,8 @@ func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*sch
|
||||
// The maximum priority value to give to a node
|
||||
// Priority values range from 0 - maxPriority
|
||||
const maxPriority = float64(10)
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
fScore := maxPriority
|
||||
if maxCount > 0 {
|
||||
fScore = (1.0 - counts[node.Name]/maxCount) * 10
|
||||
|
||||
@@ -27,9 +27,9 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func nodeWithTaints(nodeName string, taints []api.Taint) api.Node {
|
||||
func nodeWithTaints(nodeName string, taints []api.Taint) *api.Node {
|
||||
taintsData, _ := json.Marshal(taints)
|
||||
return api.Node{
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Annotations: map[string]string{
|
||||
@@ -57,7 +57,7 @@ func podWithTolerations(tolerations []api.Toleration) *api.Pod {
|
||||
func TestTaintAndToleration(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
nodes []api.Node
|
||||
nodes []*api.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -70,7 +70,7 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
Value: "bar",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
@@ -103,7 +103,7 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{}),
|
||||
nodeWithTaints("nodeB", []api.Taint{
|
||||
{
|
||||
@@ -139,7 +139,7 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
Value: "bar",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{}),
|
||||
nodeWithTaints("nodeB", []api.Taint{
|
||||
{
|
||||
@@ -182,7 +182,7 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
Effect: api.TaintEffectNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodes: []api.Node{
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{}),
|
||||
nodeWithTaints("nodeB", []api.Taint{
|
||||
{
|
||||
@@ -215,7 +215,7 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
list, err := ComputeTaintTolerationPriority(
|
||||
test.pod,
|
||||
nodeNameToInfo,
|
||||
algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
algorithm.FakeNodeLister(test.nodes))
|
||||
if err != nil {
|
||||
t.Errorf("%s, unexpected error: %v", test.test, err)
|
||||
}
|
||||
|
||||
@@ -27,12 +27,12 @@ import (
|
||||
type SchedulerExtender interface {
|
||||
// Filter based on extender-implemented predicate functions. The filtered list is
|
||||
// expected to be a subset of the supplied list.
|
||||
Filter(pod *api.Pod, nodes *api.NodeList) (filteredNodes *api.NodeList, err error)
|
||||
Filter(pod *api.Pod, nodes []*api.Node) (filteredNodes []*api.Node, err error)
|
||||
|
||||
// Prioritize based on extender-implemented priority functions. The returned scores & weight
|
||||
// are used to compute the weighted score for an extender. The weighted scores are added to
|
||||
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
|
||||
Prioritize(pod *api.Pod, nodes *api.NodeList) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
||||
Prioritize(pod *api.Pod, nodes []*api.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
||||
}
|
||||
|
||||
// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods
|
||||
|
||||
Reference in New Issue
Block a user