diff --git a/pkg/scheduler/framework/plugins/noderesources/fit.go b/pkg/scheduler/framework/plugins/noderesources/fit.go index a6905387539..eac24577a37 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -268,14 +268,14 @@ func (f *Fit) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithH } return []framework.ClusterEventWithHint{ - {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodChange}, + {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodEvent}, {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}, QueueingHintFn: f.isSchedulableAfterNodeChange}, }, nil } -// isSchedulableAfterPodChange is invoked whenever a pod deleted or updated. It checks whether +// isSchedulableAfterPodEvent is invoked whenever a pod deleted or scaled down. It checks whether // that change made a previously unschedulable pod schedulable. -func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { +func (f *Fit) isSchedulableAfterPodEvent(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { originalPod, modifiedPod, err := schedutil.As[*v1.Pod](oldObj, newObj) if err != nil { return framework.Queue, err @@ -286,25 +286,24 @@ func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldOb logger.V(5).Info("the deleted pod was unscheduled and it wouldn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) return framework.QueueSkip, nil } + + // any deletion event to a scheduled pod could make the unscheduled pod schedulable. logger.V(5).Info("another scheduled pod was deleted, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) return framework.Queue, nil } if !f.enableInPlacePodVerticalScaling { - // If InPlacePodVerticalScaling (KEP 1287) is disabled, it cannot free up resources. + // If InPlacePodVerticalScaling (KEP 1287) is disabled, the pod scale down event cannot free up any resources. logger.V(5).Info("another pod was modified, but InPlacePodVerticalScaling is disabled, so it doesn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) return framework.QueueSkip, nil } - // Modifications may or may not be relevant. We only care about modifications that - // change the other pod's resource request and the resource is also requested by the - // pod we are trying to schedule. - if !f.isResourceScaleDown(pod, originalPod, modifiedPod) { + if !f.isSchedulableAfterPodScaleDown(pod, originalPod, modifiedPod) { if loggerV := logger.V(10); loggerV.Enabled() { // Log more information. - loggerV.Info("another Pod got modified, but the modification isn't related to the resource request", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", cmp.Diff(originalPod, modifiedPod)) + loggerV.Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", cmp.Diff(originalPod, modifiedPod)) } else { - logger.V(5).Info("another Pod got modified, but the modification isn't related to the resource request", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) + logger.V(5).Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) } return framework.QueueSkip, nil } @@ -313,12 +312,17 @@ func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldOb return framework.Queue, nil } -// isResourceScaleDown checks whether an update event may make the pod schedulable. Specifically: -// - Returns true when an update event shows a scheduled pod's resource request got reduced. -// - Returns true when an update event is for the unscheduled pod itself, and it shows the pod's resource request got reduced. -func (f *Fit) isResourceScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool { - if modifiedPod.UID != targetPod.UID && modifiedPod.Spec.NodeName == "" { - // If the update event is not for targetPod and a scheduled Pod, +// isSchedulableAfterPodScaleDown checks whether the scale down event may make the target pod schedulable. Specifically: +// - Returns true when the update event is for the target pod itself. +// - Returns true when the update event shows a scheduled pod's resource request that the target pod also requests got reduced. +func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool { + if modifiedPod.UID == targetPod.UID { + // If the scaling down event is for targetPod, it would make targetPod schedulable. + return true + } + + if modifiedPod.Spec.NodeName == "" { + // If the update event is for a unscheduled Pod, // it wouldn't make targetPod schedulable. return false } diff --git a/pkg/scheduler/framework/plugins/noderesources/fit_test.go b/pkg/scheduler/framework/plugins/noderesources/fit_test.go index 0f5ed1f097a..810cb0f6d76 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit_test.go @@ -1171,51 +1171,38 @@ func Test_isSchedulableAfterPodChange(t *testing.T) { expectedHint: framework.QueueSkip, }, "skip-queue-on-disable-inplace-pod-vertical-scaling": { - pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), - oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(), + pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), + oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(), + // (Actually, this scale down cannot happen when InPlacePodVerticalScaling is disabled.) newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(), enableInPlacePodVerticalScaling: false, expectedHint: framework.QueueSkip, }, "skip-queue-on-other-unscheduled-pod": { - pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid0").Obj(), - oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).UID("uid1").Obj(), - newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid1").Obj(), + pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid0").Obj(), + oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).UID("uid1").Obj(), + newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid1").Obj(), enableInPlacePodVerticalScaling: true, expectedHint: framework.QueueSkip, }, - "skip-queue-on-other-pod-non-resource-changes": { - pod: &v1.Pod{}, - oldObj: st.MakePod().Name("pod2").Label("k", "v").Node("fake").Obj(), - newObj: st.MakePod().Name("pod2").Label("foo", "bar").Node("fake").Obj(), - enableInPlacePodVerticalScaling: true, - expectedHint: framework.QueueSkip, - }, - "skip-queue-on-other-pod-unrelated-resource-changes": { - pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), - oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(), - newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(), - enableInPlacePodVerticalScaling: true, - expectedHint: framework.QueueSkip, - }, - "skip-queue-on-other-pod-resource-scale-up": { - pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), - oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(), - newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(), + "skip-queue-on-other-pod-unrelated-resource-scaled-down": { + pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), + oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(), + newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(), enableInPlacePodVerticalScaling: true, expectedHint: framework.QueueSkip, }, "queue-on-other-pod-some-resource-scale-down": { - pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), - oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(), - newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(), + pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), + oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(), + newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(), enableInPlacePodVerticalScaling: true, expectedHint: framework.Queue, }, "queue-on-target-pod-some-resource-scale-down": { - pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), - oldObj: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(), - newObj: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), + pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), + oldObj: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(), + newObj: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), enableInPlacePodVerticalScaling: true, expectedHint: framework.Queue, }, @@ -1230,7 +1217,7 @@ func Test_isSchedulableAfterPodChange(t *testing.T) { if err != nil { t.Fatal(err) } - actualHint, err := p.(*Fit).isSchedulableAfterPodChange(logger, tc.pod, tc.oldObj, tc.newObj) + actualHint, err := p.(*Fit).isSchedulableAfterPodEvent(logger, tc.pod, tc.oldObj, tc.newObj) if tc.expectedErr { require.Error(t, err) return