From b7d90ca99165b56495adcd0a52cc4bd7c9a821fa Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Tue, 5 Oct 2021 16:46:59 -0700 Subject: [PATCH] sched: adjust events to register for VolumeBinding plugin --- .../plugins/volumebinding/volume_binding.go | 2 +- pkg/scheduler/testing/wrappers.go | 11 +++++++++++ test/integration/scheduler/queue_test.go | 19 +++++++++++-------- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 349294aae71..dfda81a0e50 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -105,7 +105,7 @@ func (pl *VolumeBinding) EventsToRegister() []framework.ClusterEvent { // Pods may fail to find available PVs because the node labels do not // match the storage class's allowed topologies or PV's node affinity. // A new or updated node may make pods schedulable. - {Resource: framework.Node, ActionType: framework.Add | framework.Update}, + {Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel}, // We rely on CSI node to translate in-tree PV to CSI. {Resource: framework.CSINode, ActionType: framework.Add | framework.Update}, } diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 8e0c392b43b..7e30d5c0f69 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -285,6 +285,17 @@ func (p *PodWrapper) HostPort(port int32) *PodWrapper { return p } +// PVC creates a Volume with a PVC and injects into the inner pod. +func (p *PodWrapper) PVC(name string) *PodWrapper { + p.Spec.Volumes = append(p.Spec.Volumes, v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: name}, + }, + }) + return p +} + // PodAffinityKind represents different kinds of PodAffinity. type PodAffinityKind int diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go index 91b6f667557..da0cc3d5aa3 100644 --- a/test/integration/scheduler/queue_test.go +++ b/test/integration/scheduler/queue_test.go @@ -76,23 +76,25 @@ func TestCoreResourceEnqueue(t *testing.T) { // - Pod1 is a best-effort Pod, but doesn't have the required toleration. // - Pod2 requests a large amount of CPU resource that the node cannot fit. // Note: Pod2 will fail the tainttoleration plugin b/c that's ordered prior to noderesources. + // - Pod3 has the required toleration, but requests a non-existing PVC. pod1 := st.MakePod().Namespace(ns).Name("pod1").Container("image").Obj() pod2 := st.MakePod().Namespace(ns).Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj() - for _, pod := range []*v1.Pod{pod1, pod2} { + pod3 := st.MakePod().Namespace(ns).Name("pod3").Toleration("foo").PVC("pvc").Container("image").Obj() + for _, pod := range []*v1.Pod{pod1, pod2, pod3} { if _, err := cs.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } } - // Wait for the two pods to be present in the scheduling queue. + // Wait for the three pods to be present in the scheduling queue. if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) { - return len(testCtx.Scheduler.SchedulingQueue.PendingPods()) == 2, nil + return len(testCtx.Scheduler.SchedulingQueue.PendingPods()) == 3, nil }); err != nil { t.Fatal(err) } - // Pop the two pods out. They should be unschedulable. - for i := 0; i < 2; i++ { + // Pop the three pods out. They should be unschedulable. + for i := 0; i < 3; i++ { podInfo := nextPodOrDie(t, testCtx) fwk, ok := testCtx.Scheduler.Profiles[podInfo.Pod.Spec.SchedulerName] if !ok { @@ -126,9 +128,10 @@ func TestCoreResourceEnqueue(t *testing.T) { t.Fatalf("Exepcted pod1 to be popped, but got %v", got) } - // Pod2 is not expected to be popped out. - // Although the failure reason has been lifted, it still won't be moved to active due to - // the node event's preCheckForNode(). + // Pod2 and Pod3 are not expected to be popped out. + // - Although the failure reason has been lifted, Pod2 still won't be moved to active due to + // the node event's preCheckForNode(). + // - Regarding Pod3, the NodeTaintChange event is irrelevant with its scheduling failure. podInfo = nextPod(t, testCtx) if podInfo != nil { t.Fatalf("Unexpected pod %v get popped out", podInfo.Pod.Name)