From 3a9e988015aa4107edfded3be244a24b780c994a Mon Sep 17 00:00:00 2001 From: John Calabrese Date: Thu, 10 May 2018 07:12:20 -0400 Subject: [PATCH] use subtest for table units --- .../balanced_resource_allocation_test.go | 50 ++-- .../priorities/image_locality_test.go | 30 +-- .../priorities/interpod_affinity_test.go | 96 ++++---- .../priorities/least_requested_test.go | 36 +-- .../algorithm/priorities/metadata_test.go | 20 +- .../priorities/most_requested_test.go | 30 +-- .../priorities/node_affinity_test.go | 30 +-- .../algorithm/priorities/node_label_test.go | 46 ++-- .../priorities/node_prefer_avoid_pods_test.go | 34 +-- .../priorities/resource_limits_test.go | 31 +-- .../priorities/selector_spreading_test.go | 226 +++++++++--------- .../priorities/taint_toleration_test.go | 33 +-- 12 files changed, 345 insertions(+), 317 deletions(-) diff --git a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index 0725a4d6609..fae45b1a32c 100644 --- a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -216,7 +216,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pods []*v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { /* @@ -233,7 +233,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "nothing scheduled, nothing requested", + name: "nothing scheduled, nothing requested", }, { /* @@ -250,7 +250,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "nothing scheduled, resources requested, differently sized machines", + name: "nothing scheduled, resources requested, differently sized machines", }, { /* @@ -267,7 +267,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "no resources requested, pods scheduled", + name: "no resources requested, pods scheduled", pods: []*v1.Pod{ {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -290,7 +290,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}}, - test: "no resources requested, pods scheduled with resources", + name: "no resources requested, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -313,7 +313,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}}, - test: "resources requested, pods scheduled with resources", + name: "resources requested, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -334,7 +334,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}}, - test: "resources requested, pods scheduled with resources, differently sized machines", + name: "resources requested, pods scheduled with resources, differently sized machines", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -355,7 +355,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: cpuOnly}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "requested resources exceed node capacity", + name: "requested resources exceed node capacity", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -365,7 +365,7 @@ func TestBalancedResourceAllocation(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "zero node resources, pods scheduled with resources", + name: "zero node resources, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -389,7 +389,7 @@ func TestBalancedResourceAllocation(t *testing.T) { }, nodes: []*v1.Node{makeNode("machine3", 3500, 40000), makeNode("machine4", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine3", Score: 8}, {Host: "machine4", Score: 9}}, - test: "Include volume count on a node for balanced resource allocation", + name: "Include volume count on a node for balanced resource allocation", pods: []*v1.Pod{ {Spec: cpuAndMemory3}, {Spec: podwithVol1}, @@ -400,20 +400,22 @@ func TestBalancedResourceAllocation(t *testing.T) { } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) - if len(test.pod.Spec.Volumes) > 0 { - maxVolumes := 5 - for _, info := range nodeNameToInfo { - info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes) - info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes) + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + if len(test.pod.Spec.Volumes) > 0 { + maxVolumes := 5 + for _, info := range nodeNameToInfo { + info.TransientInfo.TransNodeInfo.AllocatableVolumesCount = getExistingVolumeCountForNode(info.Pods(), maxVolumes) + info.TransientInfo.TransNodeInfo.RequestedVolumes = len(test.pod.Spec.Volumes) + } } - } - list, err := priorityFunction(BalancedResourceAllocationMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + list, err := priorityFunction(BalancedResourceAllocationMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/image_locality_test.go b/pkg/scheduler/algorithm/priorities/image_locality_test.go index dd76371845a..d62e1a6036b 100644 --- a/pkg/scheduler/algorithm/priorities/image_locality_test.go +++ b/pkg/scheduler/algorithm/priorities/image_locality_test.go @@ -110,7 +110,7 @@ func TestImageLocalityPriority(t *testing.T) { pods []*v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { // Pod: gcr.io/40 gcr.io/250 @@ -125,7 +125,7 @@ func TestImageLocalityPriority(t *testing.T) { pod: &v1.Pod{Spec: test40250}, nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}}, - test: "two images spread on two nodes, prefer the larger image one", + name: "two images spread on two nodes, prefer the larger image one", }, { // Pod: gcr.io/40 gcr.io/140 @@ -140,7 +140,7 @@ func TestImageLocalityPriority(t *testing.T) { pod: &v1.Pod{Spec: test40140}, nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}}, - test: "two images on one node, prefer this node", + name: "two images on one node, prefer this node", }, { // Pod: gcr.io/2000 gcr.io/10 @@ -155,23 +155,25 @@ func TestImageLocalityPriority(t *testing.T) { pod: &v1.Pod{Spec: testMinMax}, nodes: []*v1.Node{makeImageNode("machine1", node401402000), makeImageNode("machine2", node25010)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, - test: "if exceed limit, use limit", + name: "if exceed limit, use limit", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) - list, err := priorityFunction(ImageLocalityPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + list, err := priorityFunction(ImageLocalityPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } - sort.Sort(test.expectedList) - sort.Sort(list) + sort.Sort(test.expectedList) + sort.Sort(list) - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index 6987e1d9ef4..64f2792fe1e 100644 --- a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -267,7 +267,7 @@ func TestInterPodAffinityPriority(t *testing.T) { pods []*v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, @@ -277,7 +277,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, - test: "all machines are same priority as Affinity is nil", + name: "all machines are same priority as Affinity is nil", }, // the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score // the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score @@ -295,7 +295,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, - test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" + + name: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" + "which doesn't match either pods in nodes or in topology key", }, // the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score @@ -313,7 +313,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, - test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score", + name: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score", }, // there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference. // But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia. @@ -337,7 +337,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 5}}, - test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", + name: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", }, // Test with the different operators and values for pod affinity scheduling preference, including some match failures. { @@ -353,7 +353,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, - test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", + name: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", }, // Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, // but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference. @@ -369,7 +369,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, - test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + name: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, @@ -383,7 +383,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, - test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + name: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, // The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity. @@ -403,7 +403,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ", + name: "Anti Affinity: pod that doesnot match existing pods in node will get high score ", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, @@ -416,7 +416,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ", + name: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: "", Affinity: awayFromS1InAz}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, @@ -430,7 +430,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score", + name: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score", }, // Test the symmetry cases for anti affinity { @@ -444,7 +444,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score", + name: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score", }, // Test both affinity and anti-affinity { @@ -458,7 +458,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, - test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity", + name: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity", }, // Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service), // the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level, @@ -483,7 +483,7 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: schedulerapi.MaxPriority}, {Host: "machine5", Score: 4}}, - test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", + name: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", }, // Consider Affinity, Anti Affinity and symmetry together. // for Affinity, the weights are: 8, 0, 0, 0 @@ -505,24 +505,26 @@ func TestInterPodAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}}, - test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", + name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) - interPodAffinity := InterPodAffinity{ - info: FakeNodeListInfo(test.nodes), - nodeLister: schedulertesting.FakeNodeLister(test.nodes), - podLister: schedulertesting.FakePodLister(test.pods), - hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, - } - list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + interPodAffinity := InterPodAffinity{ + info: FakeNodeListInfo(test.nodes), + nodeLister: schedulertesting.FakeNodeLister(test.nodes), + podLister: schedulertesting.FakePodLister(test.pods), + hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, + } + list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected \n\t%#v, \ngot \n\t%#v\n", test.expectedList, list) + } + }) } } @@ -563,7 +565,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { nodes []*v1.Node hardPodAffinityWeight int32 expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}}, @@ -578,7 +580,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { }, hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: 0}}, - test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score", + name: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score", }, { pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}}, @@ -593,23 +595,25 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { }, hardPodAffinityWeight: 0, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, - test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match", + name: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) - ipa := InterPodAffinity{ - info: FakeNodeListInfo(test.nodes), - nodeLister: schedulertesting.FakeNodeLister(test.nodes), - podLister: schedulertesting.FakePodLister(test.pods), - hardPodAffinityWeight: test.hardPodAffinityWeight, - } - list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + ipa := InterPodAffinity{ + info: FakeNodeListInfo(test.nodes), + nodeLister: schedulertesting.FakeNodeLister(test.nodes), + podLister: schedulertesting.FakePodLister(test.pods), + hardPodAffinityWeight: test.hardPodAffinityWeight, + } + list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected \n\t%#v, \ngot \n\t%#v\n", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/least_requested_test.go b/pkg/scheduler/algorithm/priorities/least_requested_test.go index 3b5308d7ba1..c272fa34645 100644 --- a/pkg/scheduler/algorithm/priorities/least_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/least_requested_test.go @@ -94,7 +94,7 @@ func TestLeastRequested(t *testing.T) { pods []*v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { /* @@ -111,7 +111,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "nothing scheduled, nothing requested", + name: "nothing scheduled, nothing requested", }, { /* @@ -128,7 +128,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}}, - test: "nothing scheduled, resources requested, differently sized machines", + name: "nothing scheduled, resources requested, differently sized machines", }, { /* @@ -145,7 +145,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "no resources requested, pods scheduled", + name: "no resources requested, pods scheduled", pods: []*v1.Pod{ {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -168,7 +168,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}}, - test: "no resources requested, pods scheduled with resources", + name: "no resources requested, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -191,7 +191,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}}, - test: "resources requested, pods scheduled with resources", + name: "resources requested, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -212,7 +212,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}}, - test: "resources requested, pods scheduled with resources, differently sized machines", + name: "resources requested, pods scheduled with resources, differently sized machines", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -233,7 +233,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: cpuOnly}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}}, - test: "requested resources exceed node capacity", + name: "requested resources exceed node capacity", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -243,7 +243,7 @@ func TestLeastRequested(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "zero node resources, pods scheduled with resources", + name: "zero node resources, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -252,13 +252,15 @@ func TestLeastRequested(t *testing.T) { } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) - list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + list, err := priorityFunction(LeastRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/metadata_test.go b/pkg/scheduler/algorithm/priorities/metadata_test.go index be354217a89..996993702b5 100644 --- a/pkg/scheduler/algorithm/priorities/metadata_test.go +++ b/pkg/scheduler/algorithm/priorities/metadata_test.go @@ -117,13 +117,13 @@ func TestPriorityMetadata(t *testing.T) { } tests := []struct { pod *v1.Pod - test string + name string expected interface{} }{ { pod: nil, expected: nil, - test: "pod is nil , priorityMetadata is nil", + name: "pod is nil , priorityMetadata is nil", }, { pod: podWithTolerationsAndAffinity, @@ -132,7 +132,7 @@ func TestPriorityMetadata(t *testing.T) { podTolerations: tolerations, affinity: podAffinity, }, - test: "Produce a priorityMetadata with default requests", + name: "Produce a priorityMetadata with default requests", }, { pod: podWithTolerationsAndRequests, @@ -141,7 +141,7 @@ func TestPriorityMetadata(t *testing.T) { podTolerations: tolerations, affinity: nil, }, - test: "Produce a priorityMetadata with specified requests", + name: "Produce a priorityMetadata with specified requests", }, { pod: podWithAffinityAndRequests, @@ -150,7 +150,7 @@ func TestPriorityMetadata(t *testing.T) { podTolerations: nil, affinity: podAffinity, }, - test: "Produce a priorityMetadata with specified requests", + name: "Produce a priorityMetadata with specified requests", }, } mataDataProducer := NewPriorityMetadataFactory( @@ -159,9 +159,11 @@ func TestPriorityMetadata(t *testing.T) { schedulertesting.FakeReplicaSetLister([]*extensions.ReplicaSet{}), schedulertesting.FakeStatefulSetLister([]*apps.StatefulSet{})) for _, test := range tests { - ptData := mataDataProducer(test.pod, nil) - if !reflect.DeepEqual(test.expected, ptData) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expected, ptData) - } + t.Run(test.name, func(t *testing.T) { + ptData := mataDataProducer(test.pod, nil) + if !reflect.DeepEqual(test.expected, ptData) { + t.Errorf("expected %#v, got %#v", test.expected, ptData) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/most_requested_test.go b/pkg/scheduler/algorithm/priorities/most_requested_test.go index 427a6674f8b..aa7afb8a2eb 100644 --- a/pkg/scheduler/algorithm/priorities/most_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/most_requested_test.go @@ -109,7 +109,7 @@ func TestMostRequested(t *testing.T) { pods []*v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { /* @@ -126,7 +126,7 @@ func TestMostRequested(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "nothing scheduled, nothing requested", + name: "nothing scheduled, nothing requested", }, { /* @@ -143,7 +143,7 @@ func TestMostRequested(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}}, - test: "nothing scheduled, resources requested, differently sized machines", + name: "nothing scheduled, resources requested, differently sized machines", }, { /* @@ -160,7 +160,7 @@ func TestMostRequested(t *testing.T) { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}}, - test: "no resources requested, pods scheduled with resources", + name: "no resources requested, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -183,7 +183,7 @@ func TestMostRequested(t *testing.T) { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}}, - test: "resources requested, pods scheduled with resources", + name: "resources requested, pods scheduled with resources", pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, @@ -204,18 +204,20 @@ func TestMostRequested(t *testing.T) { pod: &v1.Pod{Spec: bigCPUAndMemory}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 2}}, - test: "resources requested with more than the node, pods scheduled with resources", + name: "resources requested with more than the node, pods scheduled with resources", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) - list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + list, err := priorityFunction(MostRequestedPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/node_affinity_test.go b/pkg/scheduler/algorithm/priorities/node_affinity_test.go index e7054b1514e..60e49d6fd3e 100644 --- a/pkg/scheduler/algorithm/priorities/node_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity_test.go @@ -105,7 +105,7 @@ func TestNodeAffinityPriority(t *testing.T) { pod *v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: &v1.Pod{ @@ -119,7 +119,7 @@ func TestNodeAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, - test: "all machines are same priority as NodeAffinity is nil", + name: "all machines are same priority as NodeAffinity is nil", }, { pod: &v1.Pod{ @@ -133,7 +133,7 @@ func TestNodeAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, - test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero", + name: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero", }, { pod: &v1.Pod{ @@ -147,7 +147,7 @@ func TestNodeAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, - test: "only machine1 matches the preferred scheduling requirements of pod", + name: "only machine1 matches the preferred scheduling requirements of pod", }, { pod: &v1.Pod{ @@ -161,19 +161,21 @@ func TestNodeAffinityPriority(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 3}}, - test: "all machines matches the preferred scheduling requirements of pod but with different priorities ", + name: "all machines matches the preferred scheduling requirements of pod but with different priorities ", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) - nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil) - list, err := nap(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: \nexpected %#v, \ngot %#v", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil) + list, err := nap(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, \ngot %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/node_label_test.go b/pkg/scheduler/algorithm/priorities/node_label_test.go index 416fc9cc092..48fad6d2d3a 100644 --- a/pkg/scheduler/algorithm/priorities/node_label_test.go +++ b/pkg/scheduler/algorithm/priorities/node_label_test.go @@ -36,7 +36,7 @@ func TestNewNodeLabelPriority(t *testing.T) { label string presence bool expectedList schedulerapi.HostPriorityList - test string + name string }{ { nodes: []*v1.Node{ @@ -47,7 +47,7 @@ func TestNewNodeLabelPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "baz", presence: true, - test: "no match found, presence true", + name: "no match found, presence true", }, { nodes: []*v1.Node{ @@ -58,7 +58,7 @@ func TestNewNodeLabelPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, label: "baz", presence: false, - test: "no match found, presence false", + name: "no match found, presence false", }, { nodes: []*v1.Node{ @@ -69,7 +69,7 @@ func TestNewNodeLabelPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "foo", presence: true, - test: "one match found, presence true", + name: "one match found, presence true", }, { nodes: []*v1.Node{ @@ -80,7 +80,7 @@ func TestNewNodeLabelPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, label: "foo", presence: false, - test: "one match found, presence false", + name: "one match found, presence false", }, { nodes: []*v1.Node{ @@ -91,7 +91,7 @@ func TestNewNodeLabelPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, label: "bar", presence: true, - test: "two matches found, presence true", + name: "two matches found, presence true", }, { nodes: []*v1.Node{ @@ -102,25 +102,27 @@ func TestNewNodeLabelPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "bar", presence: false, - test: "two matches found, presence false", + name: "two matches found, presence false", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) - labelPrioritizer := &NodeLabelPrioritizer{ - label: test.label, - presence: test.presence, - } - list, err := priorityFunction(labelPrioritizer.CalculateNodeLabelPriorityMap, nil, nil)(nil, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - // sort the two lists to avoid failures on account of different ordering - sort.Sort(test.expectedList) - sort.Sort(list) - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + labelPrioritizer := &NodeLabelPrioritizer{ + label: test.label, + presence: test.presence, + } + list, err := priorityFunction(labelPrioritizer.CalculateNodeLabelPriorityMap, nil, nil)(nil, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + // sort the two lists to avoid failures on account of different ordering + sort.Sort(test.expectedList) + sort.Sort(list) + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go index 8fb852fc6f5..1586f393ef1 100644 --- a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go +++ b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go @@ -84,7 +84,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { pod *v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: &v1.Pod{ @@ -97,7 +97,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, nodes: testNodes, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, - test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score", + name: "pod managed by ReplicationController should avoid a node, this node get lowest priority score", }, { pod: &v1.Pod{ @@ -110,7 +110,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, nodes: testNodes, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, - test: "ownership by random controller should be ignored", + name: "ownership by random controller should be ignored", }, { pod: &v1.Pod{ @@ -123,7 +123,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, nodes: testNodes, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, - test: "owner without Controller field set should be ignored", + name: "owner without Controller field set should be ignored", }, { pod: &v1.Pod{ @@ -136,21 +136,23 @@ func TestNodePreferAvoidPriority(t *testing.T) { }, nodes: testNodes, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}}, - test: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score", + name: "pod managed by ReplicaSet should avoid a node, this node get lowest priority score", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) - list, err := priorityFunction(CalculateNodePreferAvoidPodsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - // sort the two lists to avoid failures on account of different ordering - sort.Sort(test.expectedList) - sort.Sort(list) - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + list, err := priorityFunction(CalculateNodePreferAvoidPodsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + // sort the two lists to avoid failures on account of different ordering + sort.Sort(test.expectedList) + sort.Sort(list) + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/resource_limits_test.go b/pkg/scheduler/algorithm/priorities/resource_limits_test.go index e3056dcc6ba..bccfdd6161b 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits_test.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits_test.go @@ -103,49 +103,50 @@ func TestResourceLimistPriority(t *testing.T) { pod *v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: &v1.Pod{Spec: noResources}, nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 0), makeNode("machine3", 0, 10000), makeNode("machine4", 0, 0)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}, {Host: "machine4", Score: 0}}, - test: "pod does not specify its resource limits", + name: "pod does not specify its resource limits", }, { pod: &v1.Pod{Spec: cpuOnly}, nodes: []*v1.Node{makeNode("machine1", 3000, 10000), makeNode("machine2", 2000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 0}}, - test: "pod only specifies cpu limits", + name: "pod only specifies cpu limits", }, { pod: &v1.Pod{Spec: memOnly}, nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 1}}, - test: "pod only specifies mem limits", + name: "pod only specifies mem limits", }, { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 1}}, - test: "pod specifies both cpu and mem limits", + name: "pod specifies both cpu and mem limits", }, { pod: &v1.Pod{Spec: cpuAndMemory}, nodes: []*v1.Node{makeNode("machine1", 0, 0)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}}, - test: "node does not advertise its allocatables", + name: "node does not advertise its allocatables", }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) - list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } - } diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index 5abd1736ac9..bf6fda25635 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -64,20 +64,20 @@ func TestSelectorSpreadPriority(t *testing.T) { services []*v1.Service sss []*apps.StatefulSet expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: new(v1.Pod), nodes: []string{"machine1", "machine2"}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "nothing scheduled", + name: "nothing scheduled", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, pods: []*v1.Pod{{Spec: zone1Spec}}, nodes: []string{"machine1", "machine2"}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "no services", + name: "no services", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -85,7 +85,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, - test: "different services", + name: "different services", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -96,7 +96,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, - test: "two pods, one service pod", + name: "two pods, one service pod", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -110,7 +110,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, - test: "five pods, one service pod in no namespace", + name: "five pods, one service pod in no namespace", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, @@ -123,7 +123,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, - test: "four pods, one service pod in default namespace", + name: "four pods, one service pod in default namespace", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, @@ -137,7 +137,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}}, - test: "five pods, one service pod in specific namespace", + name: "five pods, one service pod in specific namespace", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -149,7 +149,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "three pods, two service pods on different machines", + name: "three pods, two service pods on different machines", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -162,7 +162,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}}, - test: "four pods, three service pods", + name: "four pods, three service pods", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -174,7 +174,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "service with partial pod label matches", + name: "service with partial pod label matches", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, @@ -189,7 +189,7 @@ func TestSelectorSpreadPriority(t *testing.T) { // "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to // do spreading between all pods. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "service with partial pod label matches with service and replication controller", + name: "service with partial pod label matches with service and replication controller", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, @@ -203,7 +203,7 @@ func TestSelectorSpreadPriority(t *testing.T) { rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "service with partial pod label matches with service and replica set", + name: "service with partial pod label matches with service and replica set", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, @@ -216,7 +216,7 @@ func TestSelectorSpreadPriority(t *testing.T) { services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "service with partial pod label matches with service and replica set", + name: "service with partial pod label matches with service and replica set", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, @@ -230,7 +230,7 @@ func TestSelectorSpreadPriority(t *testing.T) { services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, // Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "disjoined service and replication controller should be treated equally", + name: "disjoined service and replication controller should be treated equally", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, @@ -244,7 +244,7 @@ func TestSelectorSpreadPriority(t *testing.T) { rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "disjoined service and replica set should be treated equally", + name: "disjoined service and replica set should be treated equally", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, @@ -257,7 +257,7 @@ func TestSelectorSpreadPriority(t *testing.T) { services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "disjoined service and replica set should be treated equally", + name: "disjoined service and replica set should be treated equally", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, @@ -270,7 +270,7 @@ func TestSelectorSpreadPriority(t *testing.T) { rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, // Both Nodes have one pod from the given RC, hence both get 0 score. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "Replication controller with partial pod label matches", + name: "Replication controller with partial pod label matches", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, @@ -283,7 +283,7 @@ func TestSelectorSpreadPriority(t *testing.T) { rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "Replica set with partial pod label matches", + name: "Replica set with partial pod label matches", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, @@ -296,7 +296,7 @@ func TestSelectorSpreadPriority(t *testing.T) { sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use StatefulSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, - test: "StatefulSet with partial pod label matches", + name: "StatefulSet with partial pod label matches", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, @@ -308,7 +308,7 @@ func TestSelectorSpreadPriority(t *testing.T) { nodes: []string{"machine1", "machine2"}, rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "Another replication controller with partial pod label matches", + name: "Another replication controller with partial pod label matches", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, @@ -321,7 +321,7 @@ func TestSelectorSpreadPriority(t *testing.T) { rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "Another replication set with partial pod label matches", + name: "Another replication set with partial pod label matches", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}}, @@ -334,34 +334,36 @@ func TestSelectorSpreadPriority(t *testing.T) { sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, // We use StatefulSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, - test: "Another stateful set with partial pod label matches", + name: "Another stateful set with partial pod label matches", }, } - for i, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes)) - selectorSpread := SelectorSpread{ - serviceLister: schedulertesting.FakeServiceLister(test.services), - controllerLister: schedulertesting.FakeControllerLister(test.rcs), - replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss), - statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss), - } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes)) + selectorSpread := SelectorSpread{ + serviceLister: schedulertesting.FakeServiceLister(test.services), + controllerLister: schedulertesting.FakeControllerLister(test.rcs), + replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss), + statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss), + } - mataDataProducer := NewPriorityMetadataFactory( - schedulertesting.FakeServiceLister(test.services), - schedulertesting.FakeControllerLister(test.rcs), - schedulertesting.FakeReplicaSetLister(test.rss), - schedulertesting.FakeStatefulSetLister(test.sss)) - mataData := mataDataProducer(test.pod, nodeNameToInfo) + mataDataProducer := NewPriorityMetadataFactory( + schedulertesting.FakeServiceLister(test.services), + schedulertesting.FakeControllerLister(test.rcs), + schedulertesting.FakeReplicaSetLister(test.rss), + schedulertesting.FakeStatefulSetLister(test.sss)) + mataData := mataDataProducer(test.pod, nodeNameToInfo) - ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData) - list, err := ttp(test.pod, nodeNameToInfo, makeNodeList(test.nodes)) - if err != nil { - t.Errorf("unexpected error: %v index : %d\n", err, i) - } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData) + list, err := ttp(test.pod, nodeNameToInfo, makeNodeList(test.nodes)) + if err != nil { + t.Errorf("unexpected error: %v \n", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } @@ -413,7 +415,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { services []*v1.Service sss []*apps.StatefulSet expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: new(v1.Pod), @@ -425,7 +427,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, - test: "nothing scheduled", + name: "nothing scheduled", }, { pod: buildPod("", labels1, nil), @@ -438,7 +440,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, - test: "no services", + name: "no services", }, { pod: buildPod("", labels1, nil), @@ -452,7 +454,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, - test: "different services", + name: "different services", }, { pod: buildPod("", labels1, nil), @@ -469,7 +471,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, - test: "two pods, 0 matching", + name: "two pods, 0 matching", }, { pod: buildPod("", labels1, nil), @@ -486,7 +488,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: schedulerapi.MaxPriority}, {Host: nodeMachine3Zone3, Score: schedulerapi.MaxPriority}, }, - test: "two pods, 1 matching (in z2)", + name: "two pods, 1 matching (in z2)", }, { pod: buildPod("", labels1, nil), @@ -506,7 +508,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: 3}, // Pod on node {Host: nodeMachine3Zone3, Score: 6}, // Pod in zone }, - test: "five pods, 3 matching (z2=2, z3=1)", + name: "five pods, 3 matching (z2=2, z3=1)", }, { pod: buildPod("", labels1, nil), @@ -525,7 +527,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: 3}, // Pod in zone {Host: nodeMachine3Zone3, Score: 3}, // Pod in zone }, - test: "four pods, 3 matching (z1=1, z2=1, z3=1)", + name: "four pods, 3 matching (z1=1, z2=1, z3=1)", }, { pod: buildPod("", labels1, nil), @@ -544,7 +546,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: 3}, // Pod in zone {Host: nodeMachine3Zone3, Score: 3}, // Pod in zone }, - test: "four pods, 3 matching (z1=1, z2=1, z3=1)", + name: "four pods, 3 matching (z1=1, z2=1, z3=1)", }, { pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")), @@ -569,36 +571,38 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { {Host: nodeMachine2Zone3, Score: 3}, // Pod in zone {Host: nodeMachine3Zone3, Score: 3}, // Pod in zone }, - test: "Replication controller spreading (z1=0, z2=1, z3=2)", + name: "Replication controller spreading (z1=0, z2=1, z3=2)", }, } - for i, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes)) - selectorSpread := SelectorSpread{ - serviceLister: schedulertesting.FakeServiceLister(test.services), - controllerLister: schedulertesting.FakeControllerLister(test.rcs), - replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss), - statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss), - } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes)) + selectorSpread := SelectorSpread{ + serviceLister: schedulertesting.FakeServiceLister(test.services), + controllerLister: schedulertesting.FakeControllerLister(test.rcs), + replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss), + statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss), + } - mataDataProducer := NewPriorityMetadataFactory( - schedulertesting.FakeServiceLister(test.services), - schedulertesting.FakeControllerLister(test.rcs), - schedulertesting.FakeReplicaSetLister(test.rss), - schedulertesting.FakeStatefulSetLister(test.sss)) - mataData := mataDataProducer(test.pod, nodeNameToInfo) - ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData) - list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes)) - if err != nil { - t.Errorf("unexpected error: %v index : %d", err, i) - } - // sort the two lists to avoid failures on account of different ordering - sort.Sort(test.expectedList) - sort.Sort(list) - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list) - } + mataDataProducer := NewPriorityMetadataFactory( + schedulertesting.FakeServiceLister(test.services), + schedulertesting.FakeControllerLister(test.rcs), + schedulertesting.FakeReplicaSetLister(test.rss), + schedulertesting.FakeStatefulSetLister(test.sss)) + mataData := mataDataProducer(test.pod, nodeNameToInfo) + ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, mataData) + list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes)) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + // sort the two lists to avoid failures on account of different ordering + sort.Sort(test.expectedList) + sort.Sort(list) + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } @@ -640,7 +644,7 @@ func TestZoneSpreadPriority(t *testing.T) { nodes map[string]map[string]string services []*v1.Service expectedList schedulerapi.HostPriorityList - test string + name string }{ { pod: new(v1.Pod), @@ -648,7 +652,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "nothing scheduled", + name: "nothing scheduled", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -657,7 +661,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "no services", + name: "no services", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -667,7 +671,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "different services", + name: "different services", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -681,7 +685,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: schedulerapi.MaxPriority}, {Host: "machine12", Score: schedulerapi.MaxPriority}, {Host: "machine21", Score: 0}, {Host: "machine22", Score: 0}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "three pods, one service pod", + name: "three pods, one service pod", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -695,7 +699,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5}, {Host: "machine21", Score: 5}, {Host: "machine22", Score: 5}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "three pods, two service pods on different machines", + name: "three pods, two service pods on different machines", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}}, @@ -710,7 +714,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0}, {Host: "machine21", Score: schedulerapi.MaxPriority}, {Host: "machine22", Score: schedulerapi.MaxPriority}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "three service label match pods in different namespaces", + name: "three service label match pods in different namespaces", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -725,7 +729,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6}, {Host: "machine21", Score: 3}, {Host: "machine22", Score: 3}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "four pods, three service pods", + name: "four pods, three service pods", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -739,7 +743,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3}, {Host: "machine21", Score: 6}, {Host: "machine22", Score: 6}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "service with partial pod label matches", + name: "service with partial pod label matches", }, { pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, @@ -754,7 +758,7 @@ func TestZoneSpreadPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7}, {Host: "machine21", Score: 5}, {Host: "machine22", Score: 5}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, - test: "service pod on non-zoned node", + name: "service pod on non-zoned node", }, } // these local variables just make sure controllerLister\replicaSetLister\statefulSetLister not nil @@ -763,28 +767,30 @@ func TestZoneSpreadPriority(t *testing.T) { rcs := []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}} rss := []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}} - for i, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes)) - zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"} + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes)) + zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"} - mataDataProducer := NewPriorityMetadataFactory( - schedulertesting.FakeServiceLister(test.services), - schedulertesting.FakeControllerLister(rcs), - schedulertesting.FakeReplicaSetLister(rss), - schedulertesting.FakeStatefulSetLister(sss)) - mataData := mataDataProducer(test.pod, nodeNameToInfo) - ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, mataData) - list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes)) - if err != nil { - t.Errorf("unexpected error: %v index : %d", err, i) - } + mataDataProducer := NewPriorityMetadataFactory( + schedulertesting.FakeServiceLister(test.services), + schedulertesting.FakeControllerLister(rcs), + schedulertesting.FakeReplicaSetLister(rss), + schedulertesting.FakeStatefulSetLister(sss)) + mataData := mataDataProducer(test.pod, nodeNameToInfo) + ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, mataData) + list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes)) + if err != nil { + t.Errorf("unexpected error: %v", err) + } - // sort the two lists to avoid failures on account of different ordering - sort.Sort(test.expectedList) - sort.Sort(list) - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("test index %d (%s): expected %#v, got %#v", i, test.test, test.expectedList, list) - } + // sort the two lists to avoid failures on account of different ordering + sort.Sort(test.expectedList) + sort.Sort(list) + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected %#v, got %#v", test.expectedList, list) + } + }) } } diff --git a/pkg/scheduler/algorithm/priorities/taint_toleration_test.go b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go index 3ef61dd987e..83009c3701a 100644 --- a/pkg/scheduler/algorithm/priorities/taint_toleration_test.go +++ b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go @@ -54,11 +54,11 @@ func TestTaintAndToleration(t *testing.T) { pod *v1.Pod nodes []*v1.Node expectedList schedulerapi.HostPriorityList - test string + name string }{ // basic test case { - test: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints", + name: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints", pod: podWithTolerations([]v1.Toleration{{ Key: "foo", Operator: v1.TolerationOpEqual, @@ -84,7 +84,7 @@ func TestTaintAndToleration(t *testing.T) { }, // the count of taints that are tolerated by pod, does not matter. { - test: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has", + name: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has", pod: podWithTolerations([]v1.Toleration{ { Key: "cpu-type", @@ -127,7 +127,7 @@ func TestTaintAndToleration(t *testing.T) { }, // the count of taints on a node that are not tolerated by pod, matters. { - test: "the more intolerable taints a node has, the lower score it gets.", + name: "the more intolerable taints a node has, the lower score it gets.", pod: podWithTolerations([]v1.Toleration{{ Key: "foo", Operator: v1.TolerationOpEqual, @@ -163,7 +163,7 @@ func TestTaintAndToleration(t *testing.T) { }, // taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule { - test: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function", + name: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function", pod: podWithTolerations([]v1.Toleration{ { Key: "cpu-type", @@ -205,7 +205,7 @@ func TestTaintAndToleration(t *testing.T) { }, }, { - test: "Default behaviour No taints and tolerations, lands on node with no taints", + name: "Default behaviour No taints and tolerations, lands on node with no taints", //pod without tolerations pod: podWithTolerations([]v1.Toleration{}), nodes: []*v1.Node{ @@ -226,16 +226,17 @@ func TestTaintAndToleration(t *testing.T) { }, } for _, test := range tests { - nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) - ttp := priorityFunction(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil) - list, err := ttp(test.pod, nodeNameToInfo, test.nodes) - if err != nil { - t.Errorf("%s, unexpected error: %v", test.test, err) - } + t.Run(test.name, func(t *testing.T) { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + ttp := priorityFunction(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil) + list, err := ttp(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } - if !reflect.DeepEqual(test.expectedList, list) { - t.Errorf("%s,\nexpected:\n\t%+v,\ngot:\n\t%+v", test.test, test.expectedList, list) - } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, list) + } + }) } - }