[scheduler] [test]: Fix PreferNominatedNode test

Once the node gets deleted, the nodelifecycle controller
is racing to update pod status and the pod deletion logic
is failing causing tests to flake. This commit moves
the testContext creation to within the test loop and deletes nodes,
namespace within the test loop. We don't explicitly call the node
deletion within the loop but the `testutils.CleanupTest(t, testCtx)`
call ensures that the namespace, nodes gets deleted.
This commit is contained in:
ravisantoshgudimetla 2021-10-05 12:34:29 -04:00
parent 9cb0c403df
commit 9476237923

View File

@ -1341,52 +1341,37 @@ func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...schedule
// enabled.
func TestPreferNominatedNode(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PreferNominatedNode, true)()
testCtx := initTestPreferNominatedNode(t, "perfer-nominated-node")
t.Cleanup(func() {
testutils.CleanupTest(t, testCtx)
})
cs := testCtx.ClientSet
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
}
defaultNodeRes := map[v1.ResourceName]string{
v1.ResourcePods: "32",
v1.ResourceCPU: "500m",
v1.ResourceMemory: "500",
}
type nodeConfig struct {
name string
res map[v1.ResourceName]string
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
}
tests := []struct {
name string
nodes []*nodeConfig
nodeNames []string
existingPods []*v1.Pod
pod *v1.Pod
runnningNode string
}{
{
name: "nominated node released all resource, preemptor is scheduled to the nominated node",
nodes: []*nodeConfig{
{name: "node-1", res: defaultNodeRes},
{name: "node-2", res: defaultNodeRes},
},
name: "nominated node released all resource, preemptor is scheduled to the nominated node",
nodeNames: []string{"node-1", "node-2"},
existingPods: []*v1.Pod{
initPausePod(&pausePodConfig{
Name: "low-pod1",
Namespace: testCtx.NS.Name,
Priority: &lowPriority,
NodeName: "node-2",
Resources: defaultPodRes,
}),
},
pod: initPausePod(&pausePodConfig{
Name: "preemptor-pod",
Namespace: testCtx.NS.Name,
Priority: &highPriority,
Name: "preemptor-pod",
Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
@ -1395,24 +1380,19 @@ func TestPreferNominatedNode(t *testing.T) {
runnningNode: "node-1",
},
{
name: "nominated node cannot pass all the filters, preemptor should find a different node",
nodes: []*nodeConfig{
{name: "node-1", res: defaultNodeRes},
{name: "node-2", res: defaultNodeRes},
},
name: "nominated node cannot pass all the filters, preemptor should find a different node",
nodeNames: []string{"node-1", "node-2"},
existingPods: []*v1.Pod{
initPausePod(&pausePodConfig{
Name: "low-pod1",
Namespace: testCtx.NS.Name,
Name: "low-pod",
Priority: &lowPriority,
Resources: defaultPodRes,
NodeName: "node-1",
}),
},
pod: initPausePod(&pausePodConfig{
Name: "preemptor-pod",
Namespace: testCtx.NS.Name,
Priority: &highPriority,
Name: "preemptor-pod1",
Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
@ -1424,22 +1404,31 @@ func TestPreferNominatedNode(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testCtx := initTestPreferNominatedNode(t, "perfer-nominated-node")
t.Cleanup(func() {
testutils.CleanupTest(t, testCtx)
})
cs := testCtx.ClientSet
nsName := testCtx.NS.Name
var err error
var preemptor *v1.Pod
for _, nodeConf := range test.nodes {
_, err := createNode(cs, st.MakeNode().Name(nodeConf.name).Capacity(nodeConf.res).Obj())
for _, nodeName := range test.nodeNames {
_, err := createNode(cs, st.MakeNode().Name(nodeName).Capacity(defaultNodeRes).Obj())
if err != nil {
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
t.Fatalf("Error creating node %v: %v", nodeName, err)
}
}
pods := make([]*v1.Pod, len(test.existingPods))
// Create and run existingPods.
for i, p := range test.existingPods {
p.Namespace = nsName
pods[i], err = runPausePod(cs, p)
if err != nil {
t.Fatalf("Error running pause pod: %v", err)
}
}
test.pod.Namespace = nsName
preemptor, err = createPausePod(cs, test.pod)
if err != nil {
t.Errorf("Error while creating high priority pod: %v", err)
@ -1461,10 +1450,6 @@ func TestPreferNominatedNode(t *testing.T) {
if preemptor.Spec.NodeName != test.runnningNode {
t.Errorf("Expect pod running on %v, got %v.", test.runnningNode, preemptor.Spec.NodeName)
}
pods = append(pods, preemptor)
// cleanup
defer testutils.CleanupPods(cs, t, pods)
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
})
}
}