mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-12-15 23:53:09 +00:00
Fine grain QueueHints for nodeaffinity plugin.
Skip queue on unrelated change that keeps pod schedulable when QueueHints are enabled. Split add from QHints disabled case Remove case when QHints are disabled Remove two GHint alternatives in unit tests more fine-grained Node QHint for NodeResourceFit plugin Return early when updated Node causes unmatch Revert "more fine-grained Node QHint for NodeResourceFit plugin" This reverts commit dfbceb60e0c1c4e47748c12722d9ed6dba1a8366. Add integration test for requeue of a pod previously rejected by NodeAffinity plugin when a suitable Node is added Add integratin test for a Node update operation that does not trigger requeue in NodeAffinity plugin Remove innacurrate comment Apply review comments
This commit is contained in:
@@ -271,7 +271,48 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
// It causes pod1 to be requeued.
|
||||
// It causes pod2 not to be requeued.
|
||||
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("fake-node1").Label("group", "b").Obj(), metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to remove taints off the node: %w", err)
|
||||
return fmt.Errorf("failed to update the pod: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
},
|
||||
{
|
||||
name: "Pod rejected by the NodeAffinity plugin is not requeued when an updated Node haven't changed the 'match' verdict",
|
||||
initialNodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Label("group", "a").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
|
||||
st.MakeNode().Name("node2").Label("group", "b").Obj()},
|
||||
pods: []*v1.Pod{
|
||||
// - The initial pod would be accepted by the NodeAffinity plugin for node1, but will be blocked by the NodeResources plugin.
|
||||
// - The pod will be blocked by the NodeAffinity plugin for node2, therefore we know NodeAffinity will be queried for qhint for both testing nodes.
|
||||
st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"a"}, st.NodeSelectorTypeMatchExpressions).Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Container("image").Obj(),
|
||||
},
|
||||
triggerFn: func(testCtx *testutils.TestContext) error {
|
||||
// Trigger a NodeUpdate event to add new label.
|
||||
// It won't cause pod to be requeued, because there was a match already before the update, meaning this plugin wasn't blocking the scheduling.
|
||||
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, st.MakeNode().Name("node1").Label("group", "a").Label("node", "fake-node").Obj(), metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to update the pod: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.Set[string]{},
|
||||
enableSchedulingQueueHint: []bool{true},
|
||||
},
|
||||
{
|
||||
name: "Pod rejected by the NodeAffinity plugin is requeued when a Node is added",
|
||||
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Label("group", "a").Obj()},
|
||||
pods: []*v1.Pod{
|
||||
// - Pod1 will be rejected by the NodeAffinity plugin.
|
||||
st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
|
||||
// - Pod2 will be rejected by the NodeAffinity plugin.
|
||||
st.MakePod().Name("pod2").NodeAffinityIn("group", []string{"c"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
|
||||
},
|
||||
triggerFn: func(testCtx *testutils.TestContext) error {
|
||||
// Trigger a NodeAdd event with the awaited label.
|
||||
// It causes pod1 to be requeued.
|
||||
// It causes pod2 not to be requeued.
|
||||
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, st.MakeNode().Name("fake-node2").Label("group", "b").Obj(), metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to update the pod: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user