mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Merge pull request #109442 from ahg-g/ahg-fix
Correct event registration for multiple scheduler plugins
This commit is contained in:
commit
65178fec72
@ -83,7 +83,7 @@ func (s *preFilterState) Clone() framework.StateData {
|
||||
// failed by this plugin schedulable.
|
||||
func (pl *NodeAffinity) EventsToRegister() []framework.ClusterEvent {
|
||||
return []framework.ClusterEvent{
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel},
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.Update},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ const (
|
||||
// failed by this plugin schedulable.
|
||||
func (pl *NodeName) EventsToRegister() []framework.ClusterEvent {
|
||||
return []framework.ClusterEvent{
|
||||
{Resource: framework.Node, ActionType: framework.Add},
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.Update},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ func (pl *NodePorts) EventsToRegister() []framework.ClusterEvent {
|
||||
return []framework.ClusterEvent{
|
||||
// Due to immutable fields `spec.containers[*].ports`, pod update events are ignored.
|
||||
{Resource: framework.Pod, ActionType: framework.Delete},
|
||||
{Resource: framework.Node, ActionType: framework.Add},
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.Update},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
|
||||
func (f *Fit) EventsToRegister() []framework.ClusterEvent {
|
||||
return []framework.ClusterEvent{
|
||||
{Resource: framework.Pod, ActionType: framework.Delete},
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeAllocatable},
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.Update},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ func (pl *TaintToleration) Name() string {
|
||||
// failed by this plugin schedulable.
|
||||
func (pl *TaintToleration) EventsToRegister() []framework.ClusterEvent {
|
||||
return []framework.ClusterEvent{
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeTaint},
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.Update},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -567,3 +567,100 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeEvents(t *testing.T) {
|
||||
// The test verifies that unschedulable pods are re-queued
|
||||
// on node update events. The scenario we are testing is the following:
|
||||
// 1. Create pod1 and node1 that is small enough to only fit pod1; pod1 schedules on node1
|
||||
// 2. Create pod2, it should be unschedulable due to insufficient cpu
|
||||
// 3. Create node2 with a taint, pod2 should still not schedule
|
||||
// 4. Remove the taint from node2; pod2 should now schedule on node2
|
||||
|
||||
testCtx := initTest(t, "node-events")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
|
||||
// 1.1 create pod1
|
||||
pod1, err := createPausePodWithResource(testCtx.ClientSet, "pod1", testCtx.NS.Name, &v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(80, resource.DecimalSI),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 1.2 Create node1
|
||||
node1, err := createNode(testCtx.ClientSet, st.MakeNode().
|
||||
Name("node-events-test-node1").
|
||||
Capacity(map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "100m",
|
||||
v1.ResourceMemory: "30",
|
||||
}).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create %s: %v", node1.Name, err)
|
||||
}
|
||||
|
||||
// 1.3 verify pod1 is scheduled
|
||||
err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, pod1, time.Second*5)
|
||||
if err != nil {
|
||||
t.Errorf("Pod %s didn't schedule: %v", pod1.Name, err)
|
||||
}
|
||||
|
||||
// 2. create pod2
|
||||
pod2, err := createPausePodWithResource(testCtx.ClientSet, "pod2", testCtx.NS.Name, &v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(40, resource.DecimalSI),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod %v: %v", pod2.Name, err)
|
||||
}
|
||||
|
||||
if err := waitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", pod2.Name, err)
|
||||
}
|
||||
|
||||
// 3.1 Create node2 with a taint
|
||||
node2 := st.MakeNode().
|
||||
Name("node-events-test-node2").
|
||||
Capacity(map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "100m",
|
||||
v1.ResourceMemory: "30",
|
||||
}).
|
||||
Label("affinity-key", "affinity-value").
|
||||
Taints([]v1.Taint{{Key: "taint-key", Effect: v1.TaintEffectNoSchedule}}).Obj()
|
||||
node2, err = createNode(testCtx.ClientSet, node2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create %s: %v", node2.Name, err)
|
||||
}
|
||||
// make sure the scheduler received the node add event by creating a pod that only fits node2
|
||||
plugPod := st.MakePod().Name("plug-pod").Namespace(testCtx.NS.Name).Container("pause").
|
||||
Req(map[v1.ResourceName]string{v1.ResourceCPU: "40m"}).
|
||||
NodeAffinityIn("affinity-key", []string{"affinity-value"}).
|
||||
Toleration("taint-key").Obj()
|
||||
plugPod, err = testCtx.ClientSet.CoreV1().Pods(plugPod.Namespace).Create(context.TODO(), plugPod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod %v: %v", plugPod.Name, err)
|
||||
}
|
||||
err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, plugPod, time.Second*5)
|
||||
if err != nil {
|
||||
t.Errorf("Pod %s didn't schedule: %v", plugPod.Name, err)
|
||||
}
|
||||
|
||||
// 3.2 pod2 still unschedulable
|
||||
if err := waitForPodUnschedulable(testCtx.ClientSet, pod2); err != nil {
|
||||
t.Errorf("Pod %v got scheduled: %v", pod2.Name, err)
|
||||
}
|
||||
|
||||
// 4. Remove node taint, pod2 should schedule
|
||||
node2.Spec.Taints = nil
|
||||
node2, err = updateNode(testCtx.ClientSet, node2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update %s: %v", node2.Name, err)
|
||||
}
|
||||
|
||||
err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, pod2, time.Second*5)
|
||||
if err != nil {
|
||||
t.Errorf("Pod %s didn't schedule: %v", pod2.Name, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -143,6 +143,10 @@ func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key stri
|
||||
return err
|
||||
}
|
||||
|
||||
func updateNode(cs clientset.Interface, node *v1.Node) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
func createNode(cs clientset.Interface, node *v1.Node) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||
}
|
||||
@ -362,7 +366,7 @@ func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
|
||||
}
|
||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return cond != nil && cond.Status == v1.ConditionFalse &&
|
||||
cond.Reason == v1.PodReasonUnschedulable, nil
|
||||
cond.Reason == v1.PodReasonUnschedulable && pod.Spec.NodeName == "", nil
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user