From f01295067ffe4b6c6de95572141664c8939bebc8 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Fri, 23 Sep 2022 12:00:14 -0400 Subject: [PATCH] Revert "Enable paralellism in scheduler unit tests" This reverts commit f37ab167f8bebb761ba0bbd58aede3b0cfd08195. Signed-off-by: Monis Khan --- pkg/scheduler/eventhandlers_test.go | 24 ---------------------- pkg/scheduler/extender_test.go | 10 --------- pkg/scheduler/schedule_one_test.go | 32 ----------------------------- pkg/scheduler/scheduler_test.go | 13 ------------ 4 files changed, 79 deletions(-) diff --git a/pkg/scheduler/eventhandlers_test.go b/pkg/scheduler/eventhandlers_test.go index 4063464a97d..856d2302652 100644 --- a/pkg/scheduler/eventhandlers_test.go +++ b/pkg/scheduler/eventhandlers_test.go @@ -48,7 +48,6 @@ import ( ) func TestNodeAllocatableChanged(t *testing.T) { - t.Parallel() newQuantity := func(value int64) resource.Quantity { return *resource.NewQuantity(value, resource.BinarySI) } @@ -71,9 +70,7 @@ func TestNodeAllocatableChanged(t *testing.T) { NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024), v1.ResourceStorage: newQuantity(1024)}, }, } { - test := test t.Run(test.Name, func(t *testing.T) { - t.Parallel() oldNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.OldAllocatable}} newNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.NewAllocatable}} changed := nodeAllocatableChanged(newNode, oldNode) @@ -85,7 +82,6 @@ func TestNodeAllocatableChanged(t *testing.T) { } func TestNodeLabelsChanged(t *testing.T) { - t.Parallel() for _, test := range []struct { Name string Changed bool @@ -106,9 +102,7 @@ func TestNodeLabelsChanged(t *testing.T) { NewLabels: map[string]string{"foo": "bar", "test": "value"}, }, } { - test := test t.Run(test.Name, func(t *testing.T) { - t.Parallel() oldNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.OldLabels}} newNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.NewLabels}} changed := nodeLabelsChanged(newNode, oldNode) @@ -120,7 +114,6 @@ func TestNodeLabelsChanged(t *testing.T) { } func TestNodeTaintsChanged(t *testing.T) { - t.Parallel() for _, test := range []struct { Name string Changed bool @@ -140,9 +133,7 @@ func TestNodeTaintsChanged(t *testing.T) { NewTaints: []v1.Taint{{Key: "key", Value: "value2"}}, }, } { - test := test t.Run(test.Name, func(t *testing.T) { - t.Parallel() oldNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.OldTaints}} newNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.NewTaints}} changed := nodeTaintsChanged(newNode, oldNode) @@ -154,7 +145,6 @@ func TestNodeTaintsChanged(t *testing.T) { } func TestNodeConditionsChanged(t *testing.T) { - t.Parallel() nodeConditionType := reflect.TypeOf(v1.NodeCondition{}) if nodeConditionType.NumField() != 6 { t.Errorf("NodeCondition type has changed. The nodeConditionsChanged() function must be reevaluated.") @@ -197,9 +187,7 @@ func TestNodeConditionsChanged(t *testing.T) { NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}, }, } { - test := test t.Run(test.Name, func(t *testing.T) { - t.Parallel() oldNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.OldConditions}} newNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.NewConditions}} changed := nodeConditionsChanged(newNode, oldNode) @@ -211,7 +199,6 @@ func TestNodeConditionsChanged(t *testing.T) { } func TestUpdatePodInCache(t *testing.T) { - t.Parallel() ttl := 10 * time.Second nodeName := "node" @@ -232,9 +219,7 @@ func TestUpdatePodInCache(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() sched := &Scheduler{ @@ -266,7 +251,6 @@ func withPodName(pod *v1.Pod, name string) *v1.Pod { } func TestPreCheckForNode(t *testing.T) { - t.Parallel() cpu4 := map[v1.ResourceName]string{v1.ResourceCPU: "4"} cpu8 := map[v1.ResourceName]string{v1.ResourceCPU: "8"} cpu16 := map[v1.ResourceName]string{v1.ResourceCPU: "16"} @@ -353,9 +337,7 @@ func TestPreCheckForNode(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() nodeInfo := framework.NewNodeInfo(tt.existingPods...) nodeInfo.SetNode(tt.nodeFn()) preCheckFn := preCheckForNode(nodeInfo) @@ -374,7 +356,6 @@ func TestPreCheckForNode(t *testing.T) { // test for informers of resources we care about is registered func TestAddAllEventHandlers(t *testing.T) { - t.Parallel() tests := []struct { name string gvkMap map[framework.GVK]framework.ActionType @@ -448,9 +429,7 @@ func TestAddAllEventHandlers(t *testing.T) { localSchemeBuilder.AddToScheme(scheme) for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -482,7 +461,6 @@ func TestAddAllEventHandlers(t *testing.T) { } func TestAdmissionCheck(t *testing.T) { - t.Parallel() nodeaffinityError := AdmissionResult{Name: nodeaffinity.Name, Reason: nodeaffinity.ErrReasonPod} nodenameError := AdmissionResult{Name: nodename.Name, Reason: nodename.ErrReason} nodeportsError := AdmissionResult{Name: nodeports.Name, Reason: nodeports.ErrReason} @@ -524,9 +502,7 @@ func TestAdmissionCheck(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() nodeInfo := framework.NewNodeInfo(tt.existingPods...) nodeInfo.SetNode(tt.node) diff --git a/pkg/scheduler/extender_test.go b/pkg/scheduler/extender_test.go index ecdecebed28..64c1cd5b53c 100644 --- a/pkg/scheduler/extender_test.go +++ b/pkg/scheduler/extender_test.go @@ -41,7 +41,6 @@ import ( ) func TestSchedulerWithExtenders(t *testing.T) { - t.Parallel() tests := []struct { name string registerPlugins []st.RegisterPluginFunc @@ -331,7 +330,6 @@ func createNode(name string) *v1.Node { } func TestIsInterested(t *testing.T) { - t.Parallel() mem := &HTTPExtender{ managedResources: sets.NewString(), } @@ -374,9 +372,7 @@ func TestIsInterested(t *testing.T) { want: true, }, } { - tc := tc t.Run(tc.label, func(t *testing.T) { - t.Parallel() if got := tc.extender.IsInterested(tc.pod); got != tc.want { t.Fatalf("IsInterested(%v) = %v, wanted %v", tc.pod, got, tc.want) } @@ -385,7 +381,6 @@ func TestIsInterested(t *testing.T) { } func TestConvertToMetaVictims(t *testing.T) { - t.Parallel() tests := []struct { name string nodeNameToVictims map[string]*extenderv1.Victims @@ -428,9 +423,7 @@ func TestConvertToMetaVictims(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() if got := convertToMetaVictims(tt.nodeNameToVictims); !reflect.DeepEqual(got, tt.want) { t.Errorf("convertToMetaVictims() = %v, want %v", got, tt.want) } @@ -439,7 +432,6 @@ func TestConvertToMetaVictims(t *testing.T) { } func TestConvertToVictims(t *testing.T) { - t.Parallel() tests := []struct { name string httpExtender *HTTPExtender @@ -496,9 +488,7 @@ func TestConvertToVictims(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() // nodeInfos instantiations nodeInfoList := make([]*framework.NodeInfo, 0, len(tt.nodeNames)) for i, nm := range tt.nodeNames { diff --git a/pkg/scheduler/schedule_one_test.go b/pkg/scheduler/schedule_one_test.go index 3c1c5c5f357..dc11fd75c35 100644 --- a/pkg/scheduler/schedule_one_test.go +++ b/pkg/scheduler/schedule_one_test.go @@ -319,7 +319,6 @@ func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, po } func TestSchedulerMultipleProfilesScheduling(t *testing.T) { - t.Parallel() nodes := []runtime.Object{ st.MakeNode().Name("node1").UID("node1").Obj(), st.MakeNode().Name("node2").UID("node2").Obj(), @@ -446,7 +445,6 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) { } func TestSchedulerScheduleOne(t *testing.T) { - t.Parallel() testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}} client := clientsetfake.NewSimpleClientset(&testNode) eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()}) @@ -641,7 +639,6 @@ func TestSchedulerScheduleOne(t *testing.T) { } func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc) @@ -707,7 +704,6 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { } func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc) @@ -777,7 +773,6 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { } func TestSchedulerFailedSchedulingReasons(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc) @@ -860,7 +855,6 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) { } func TestSchedulerWithVolumeBinding(t *testing.T) { - t.Parallel() findErr := fmt.Errorf("find err") assumeErr := fmt.Errorf("assume err") bindErr := fmt.Errorf("bind err") @@ -1007,7 +1001,6 @@ func TestSchedulerWithVolumeBinding(t *testing.T) { } func TestSchedulerBinding(t *testing.T) { - t.Parallel() table := []struct { podName string extenders []framework.Extender @@ -1043,9 +1036,7 @@ func TestSchedulerBinding(t *testing.T) { } for _, test := range table { - test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() pod := st.MakePod().Name(test.podName).Obj() defaultBound := false client := clientsetfake.NewSimpleClientset(pod) @@ -1093,7 +1084,6 @@ func TestSchedulerBinding(t *testing.T) { } func TestUpdatePod(t *testing.T) { - t.Parallel() tests := []struct { name string currentPodConditions []v1.PodCondition @@ -1235,9 +1225,7 @@ func TestUpdatePod(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() actualPatchRequests := 0 var actualPatchData string cs := &clientsetfake.Clientset{} @@ -1275,7 +1263,6 @@ func TestUpdatePod(t *testing.T) { } func TestSelectHost(t *testing.T) { - t.Parallel() tests := []struct { name string list framework.NodeScoreList @@ -1323,9 +1310,7 @@ func TestSelectHost(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() // increase the randomness for i := 0; i < 10; i++ { got, err := selectHost(test.list) @@ -1347,7 +1332,6 @@ func TestSelectHost(t *testing.T) { } func TestFindNodesThatPassExtenders(t *testing.T) { - t.Parallel() tests := []struct { name string extenders []st.FakeExtender @@ -1499,9 +1483,7 @@ func TestFindNodesThatPassExtenders(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() var extenders []framework.Extender for ii := range tt.extenders { extenders = append(extenders, &tt.extenders[ii]) @@ -1529,7 +1511,6 @@ func TestFindNodesThatPassExtenders(t *testing.T) { } func TestSchedulerSchedulePod(t *testing.T) { - t.Parallel() fts := feature.Features{} tests := []struct { name string @@ -1995,9 +1976,7 @@ func TestSchedulerSchedulePod(t *testing.T) { }, } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() cache := internalcache.New(time.Duration(0), wait.NeverStop) for _, pod := range test.pods { cache.AddPod(pod) @@ -2146,12 +2125,10 @@ func TestFindFitSomeError(t *testing.T) { } for _, node := range nodes { - node := node if node.Name == pod.Name { continue } t.Run(node.Name, func(t *testing.T) { - t.Parallel() status, found := diagnosis.NodeToStatusMap[node.Name] if !found { t.Errorf("failed to find node %v in %v", node.Name, diagnosis.NodeToStatusMap) @@ -2232,7 +2209,6 @@ func TestFindFitPredicateCallCounts(t *testing.T) { // is the one being scheduled. // - don't get the same score no matter what we schedule. func TestZeroRequest(t *testing.T) { - t.Parallel() // A pod with no resources. We expect spreading to count it as having the default resources. noResources := v1.PodSpec{ Containers: []v1.Container{ @@ -2321,9 +2297,7 @@ func TestZeroRequest(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() client := clientsetfake.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactory(client, 0) @@ -2383,7 +2357,6 @@ func TestZeroRequest(t *testing.T) { var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000) func TestNumFeasibleNodesToFind(t *testing.T) { - t.Parallel() tests := []struct { name string percentageOfNodesToScore int32 @@ -2425,9 +2398,7 @@ func TestNumFeasibleNodesToFind(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() sched := &Scheduler{ percentageOfNodesToScore: tt.percentageOfNodesToScore, } @@ -2482,7 +2453,6 @@ func TestFairEvaluationForNodes(t *testing.T) { } func TestPreferNominatedNodeFilterCallCounts(t *testing.T) { - t.Parallel() tests := []struct { name string pod *v1.Pod @@ -2509,9 +2479,7 @@ func TestPreferNominatedNodeFilterCallCounts(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { - t.Parallel() // create three nodes in the cluster. nodes := makeNodeList([]string{"node1", "node2", "node3"}) client := clientsetfake.NewSimpleClientset(test.pod) diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index a046cc30229..6dfb240833c 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -49,7 +49,6 @@ import ( ) func TestSchedulerCreation(t *testing.T) { - t.Parallel() invalidRegistry := map[string]frameworkruntime.PluginFactory{ defaultbinder.Name: defaultbinder.New, } @@ -167,9 +166,7 @@ func TestSchedulerCreation(t *testing.T) { } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() client := fake.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactory(client, 0) @@ -234,7 +231,6 @@ func TestSchedulerCreation(t *testing.T) { } func TestFailureHandler(t *testing.T) { - t.Parallel() testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Obj() testPodUpdated := testPod.DeepCopy() testPodUpdated.Labels = map[string]string{"foo": ""} @@ -266,9 +262,7 @@ func TestFailureHandler(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -320,7 +314,6 @@ func TestFailureHandler(t *testing.T) { } func TestFailureHandler_NodeNotFound(t *testing.T) { - t.Parallel() nodeFoo := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} nodeBar := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "bar"}} testPod := st.MakePod().Name("test-pod").Namespace(v1.NamespaceDefault).Obj() @@ -347,9 +340,7 @@ func TestFailureHandler_NodeNotFound(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -392,7 +383,6 @@ func TestFailureHandler_NodeNotFound(t *testing.T) { } func TestFailureHandler_PodAlreadyBound(t *testing.T) { - t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -486,7 +476,6 @@ func initScheduler(stop <-chan struct{}, cache internalcache.Cache, queue intern } func TestInitPluginsWithIndexers(t *testing.T) { - t.Parallel() tests := []struct { name string // the plugin registration ordering must not matter, being map traversal random @@ -549,9 +538,7 @@ func TestInitPluginsWithIndexers(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() fakeInformerFactory := NewInformerFactory(&fake.Clientset{}, 0*time.Second) var registerPluginFuncs []st.RegisterPluginFunc