chore(noderesourcefit): correct tests and clean up comments

This commit is contained in:
Kensei Nakada 2024-09-17 13:25:26 +09:00
parent 048c8536d6
commit 4a81a85449
2 changed files with 37 additions and 46 deletions

View File

@ -268,14 +268,14 @@ func (f *Fit) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithH
} }
return []framework.ClusterEventWithHint{ return []framework.ClusterEventWithHint{
{Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodChange}, {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodEvent},
{Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}, QueueingHintFn: f.isSchedulableAfterNodeChange}, {Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}, QueueingHintFn: f.isSchedulableAfterNodeChange},
}, nil }, nil
} }
// isSchedulableAfterPodChange is invoked whenever a pod deleted or updated. It checks whether // isSchedulableAfterPodEvent is invoked whenever a pod deleted or scaled down. It checks whether
// that change made a previously unschedulable pod schedulable. // that change made a previously unschedulable pod schedulable.
func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { func (f *Fit) isSchedulableAfterPodEvent(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) {
originalPod, modifiedPod, err := schedutil.As[*v1.Pod](oldObj, newObj) originalPod, modifiedPod, err := schedutil.As[*v1.Pod](oldObj, newObj)
if err != nil { if err != nil {
return framework.Queue, err return framework.Queue, err
@ -286,25 +286,24 @@ func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldOb
logger.V(5).Info("the deleted pod was unscheduled and it wouldn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) logger.V(5).Info("the deleted pod was unscheduled and it wouldn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
return framework.QueueSkip, nil return framework.QueueSkip, nil
} }
// any deletion event to a scheduled pod could make the unscheduled pod schedulable.
logger.V(5).Info("another scheduled pod was deleted, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod)) logger.V(5).Info("another scheduled pod was deleted, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
return framework.Queue, nil return framework.Queue, nil
} }
if !f.enableInPlacePodVerticalScaling { if !f.enableInPlacePodVerticalScaling {
// If InPlacePodVerticalScaling (KEP 1287) is disabled, it cannot free up resources. // If InPlacePodVerticalScaling (KEP 1287) is disabled, the pod scale down event cannot free up any resources.
logger.V(5).Info("another pod was modified, but InPlacePodVerticalScaling is disabled, so it doesn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) logger.V(5).Info("another pod was modified, but InPlacePodVerticalScaling is disabled, so it doesn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
return framework.QueueSkip, nil return framework.QueueSkip, nil
} }
// Modifications may or may not be relevant. We only care about modifications that if !f.isSchedulableAfterPodScaleDown(pod, originalPod, modifiedPod) {
// change the other pod's resource request and the resource is also requested by the
// pod we are trying to schedule.
if !f.isResourceScaleDown(pod, originalPod, modifiedPod) {
if loggerV := logger.V(10); loggerV.Enabled() { if loggerV := logger.V(10); loggerV.Enabled() {
// Log more information. // Log more information.
loggerV.Info("another Pod got modified, but the modification isn't related to the resource request", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", cmp.Diff(originalPod, modifiedPod)) loggerV.Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", cmp.Diff(originalPod, modifiedPod))
} else { } else {
logger.V(5).Info("another Pod got modified, but the modification isn't related to the resource request", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod)) logger.V(5).Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
} }
return framework.QueueSkip, nil return framework.QueueSkip, nil
} }
@ -313,12 +312,17 @@ func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldOb
return framework.Queue, nil return framework.Queue, nil
} }
// isResourceScaleDown checks whether an update event may make the pod schedulable. Specifically: // isSchedulableAfterPodScaleDown checks whether the scale down event may make the target pod schedulable. Specifically:
// - Returns true when an update event shows a scheduled pod's resource request got reduced. // - Returns true when the update event is for the target pod itself.
// - Returns true when an update event is for the unscheduled pod itself, and it shows the pod's resource request got reduced. // - Returns true when the update event shows a scheduled pod's resource request that the target pod also requests got reduced.
func (f *Fit) isResourceScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool { func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool {
if modifiedPod.UID != targetPod.UID && modifiedPod.Spec.NodeName == "" { if modifiedPod.UID == targetPod.UID {
// If the update event is not for targetPod and a scheduled Pod, // If the scaling down event is for targetPod, it would make targetPod schedulable.
return true
}
if modifiedPod.Spec.NodeName == "" {
// If the update event is for a unscheduled Pod,
// it wouldn't make targetPod schedulable. // it wouldn't make targetPod schedulable.
return false return false
} }

View File

@ -1171,51 +1171,38 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
expectedHint: framework.QueueSkip, expectedHint: framework.QueueSkip,
}, },
"skip-queue-on-disable-inplace-pod-vertical-scaling": { "skip-queue-on-disable-inplace-pod-vertical-scaling": {
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(), oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
// (Actually, this scale down cannot happen when InPlacePodVerticalScaling is disabled.)
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(), newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: false, enableInPlacePodVerticalScaling: false,
expectedHint: framework.QueueSkip, expectedHint: framework.QueueSkip,
}, },
"skip-queue-on-other-unscheduled-pod": { "skip-queue-on-other-unscheduled-pod": {
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid0").Obj(), pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid0").Obj(),
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).UID("uid1").Obj(), oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).UID("uid1").Obj(),
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid1").Obj(), newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid1").Obj(),
enableInPlacePodVerticalScaling: true, enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip, expectedHint: framework.QueueSkip,
}, },
"skip-queue-on-other-pod-non-resource-changes": { "skip-queue-on-other-pod-unrelated-resource-scaled-down": {
pod: &v1.Pod{}, pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod2").Label("k", "v").Node("fake").Obj(), oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
newObj: st.MakePod().Name("pod2").Label("foo", "bar").Node("fake").Obj(), newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip,
},
"skip-queue-on-other-pod-unrelated-resource-changes": {
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip,
},
"skip-queue-on-other-pod-resource-scale-up": {
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true, enableInPlacePodVerticalScaling: true,
expectedHint: framework.QueueSkip, expectedHint: framework.QueueSkip,
}, },
"queue-on-other-pod-some-resource-scale-down": { "queue-on-other-pod-some-resource-scale-down": {
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(), oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(), newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
enableInPlacePodVerticalScaling: true, enableInPlacePodVerticalScaling: true,
expectedHint: framework.Queue, expectedHint: framework.Queue,
}, },
"queue-on-target-pod-some-resource-scale-down": { "queue-on-target-pod-some-resource-scale-down": {
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
oldObj: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(), oldObj: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
newObj: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(), newObj: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
enableInPlacePodVerticalScaling: true, enableInPlacePodVerticalScaling: true,
expectedHint: framework.Queue, expectedHint: framework.Queue,
}, },
@ -1230,7 +1217,7 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
actualHint, err := p.(*Fit).isSchedulableAfterPodChange(logger, tc.pod, tc.oldObj, tc.newObj) actualHint, err := p.(*Fit).isSchedulableAfterPodEvent(logger, tc.pod, tc.oldObj, tc.newObj)
if tc.expectedErr { if tc.expectedErr {
require.Error(t, err) require.Error(t, err)
return return