register unscheduable plugin when prefileter with NodeNames

This commit is contained in:
olderTaoist
2024-07-02 09:26:58 +08:00
committed by mengxiangyong
parent d236a9127f
commit b478621596
9 changed files with 80 additions and 38 deletions

View File

@@ -188,7 +188,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
tests := []struct {
name string
// initialNode is the Node to be created at first.
initialNode *v1.Node
initialNodes []*v1.Node
// initialPod is the Pod to be created at first if it's not empty.
initialPod *v1.Pod
// pods are the list of Pods to be created.
@@ -202,8 +202,8 @@ func TestCoreResourceEnqueue(t *testing.T) {
enableSchedulingQueueHint []bool
}{
{
name: "Pod without a required toleration to a node isn't requeued to activeQ",
initialNode: st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj(),
name: "Pod without a required toleration to a node isn't requeued to activeQ",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj()},
pods: []*v1.Pod{
// - Pod1 doesn't have the required toleration and will be rejected by the TaintToleration plugin.
// (TaintToleration plugin is evaluated before NodeResourcesFit plugin.)
@@ -224,9 +224,9 @@ func TestCoreResourceEnqueue(t *testing.T) {
enableSchedulingQueueHint: []bool{false, true},
},
{
name: "Pod rejected by the PodAffinity plugin is requeued when a new Node is created and turned to ready",
initialNode: st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
initialPod: st.MakePod().Label("anti", "anti").Name("pod1").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Node("fake-node").Obj(),
name: "Pod rejected by the PodAffinity plugin is requeued when a new Node is created and turned to ready",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
initialPod: st.MakePod().Label("anti", "anti").Name("pod1").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Node("fake-node").Obj(),
pods: []*v1.Pod{
// - Pod2 will be rejected by the PodAffinity plugin.
st.MakePod().Label("anti", "anti").Name("pod2").PodAntiAffinityExists("anti", "node", st.PodAntiAffinityWithRequiredReq).Container("image").Obj(),
@@ -254,8 +254,8 @@ func TestCoreResourceEnqueue(t *testing.T) {
enableSchedulingQueueHint: []bool{false, true},
},
{
name: "Pod updated with toleration requeued to activeQ",
initialNode: st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj(),
name: "Pod updated with toleration requeued to activeQ",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj()},
pods: []*v1.Pod{
// - Pod1 doesn't have the required toleration and will be rejected by the TaintToleration plugin.
st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Container("image").Obj(),
@@ -272,8 +272,8 @@ func TestCoreResourceEnqueue(t *testing.T) {
enableSchedulingQueueHint: []bool{false, true},
},
{
name: "Pod got resource scaled down requeued to activeQ",
initialNode: st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
name: "Pod got resource scaled down requeued to activeQ",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()},
pods: []*v1.Pod{
// - Pod1 requests a large amount of CPU and will be rejected by the NodeResourcesFit plugin.
st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),
@@ -290,8 +290,8 @@ func TestCoreResourceEnqueue(t *testing.T) {
enableSchedulingQueueHint: []bool{false, true},
},
{
name: "Updating pod condition doesn't retry scheduling if the Pod was rejected by TaintToleration",
initialNode: st.MakeNode().Name("fake-node").Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj(),
name: "Updating pod condition doesn't retry scheduling if the Pod was rejected by TaintToleration",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Taints([]v1.Taint{{Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule}}).Obj()},
pods: []*v1.Pod{
// - Pod1 doesn't have the required toleration and will be rejected by the TaintToleration plugin.
st.MakePod().Name("pod1").Container("image").Obj(),
@@ -327,6 +327,41 @@ func TestCoreResourceEnqueue(t *testing.T) {
// because QHint of TaintToleration would decide to ignore a Pod update.
enableSchedulingQueueHint: []bool{true},
},
{
// The test case makes sure that PreFilter plugins returning PreFilterResult are also inserted into pInfo.UnschedulablePlugins
// meaning, they're taken into consideration during requeuing flow of the queue.
// https://github.com/kubernetes/kubernetes/issues/122018
name: "Pod rejected by the PreFilter of NodeAffinity plugin and Filter of NodeResourcesFit is requeued based on both plugins",
initialNodes: []*v1.Node{
st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
st.MakeNode().Name("fake-node2").Label("node", "fake-node2").Label("zone", "zone1").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
},
pods: []*v1.Pod{
// - Pod1 will be rejected by the NodeAffinity plugin and NodeResourcesFit plugin.
st.MakePod().Label("unscheduled", "plugins").Name("pod1").NodeAffinityIn("metadata.name", []string{"fake-node"}).Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) error {
// Trigger a NodeDeleted event.
// It will not requeue pod1 to activeQ,
// because both NodeAffinity and NodeResourceFit don't register Node/delete event.
err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node", metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to delete fake-node node")
}
// Trigger a NodeCreated event.
// It will requeue pod1 to activeQ, because QHint of NodeAffinity return Queue.
// It makes sure PreFilter plugins returned PreFilterResult takes an effect for sure,
// because NodeResourceFit QHint returns QueueSkip for this event actually.
node := st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("failed to create a new node: %w", err)
}
return nil
},
wantRequeuedPods: sets.New("pod1"),
},
}
for _, tt := range tests {
@@ -350,9 +385,11 @@ func TestCoreResourceEnqueue(t *testing.T) {
defer testCtx.Scheduler.SchedulingQueue.Close()
cs, ns, ctx := testCtx.ClientSet, testCtx.NS.Name, testCtx.Ctx
// Create initialNode.
if _, err := cs.CoreV1().Nodes().Create(ctx, tt.initialNode, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create an initial Node %q: %v", tt.initialNode.Name, err)
// Create one Node with a taint.
for _, node := range tt.initialNodes {
if _, err := cs.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create an initial Node %q: %v", node.Name, err)
}
}
if tt.initialPod != nil {