From 24a28766d4dd3a653680bb5e4b0be7f21a8c4f52 Mon Sep 17 00:00:00 2001 From: googs1025 Date: Sat, 21 Sep 2024 23:34:05 +0800 Subject: [PATCH] chore(scheduler dra): improve dra queue hint unit test --- .../dynamicresources/dynamicresources_test.go | 128 ++++++++++++++++-- pkg/scheduler/testing/wrappers.go | 15 +- 2 files changed, 133 insertions(+), 10 deletions(-) diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go index 0710bfb1121..2b18ed1b88e 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go @@ -125,6 +125,9 @@ var ( Namespace(namespace). Request(className). Obj() + deleteClaim = st.FromResourceClaim(claim). + OwnerReference(podName, podUID, podKind). + Deleting(metav1.Now()).Obj() pendingClaim = st.FromResourceClaim(claim). OwnerReference(podName, podUID, podKind). Obj() @@ -188,6 +191,8 @@ var ( ResourceClaims(resourceapi.ResourceClaimSchedulingStatus{Name: resourceName}, resourceapi.ResourceClaimSchedulingStatus{Name: resourceName2}). Obj() + resourceSlice = st.MakeResourceSlice(nodeName, driver).Device("instance-1", nil).Obj() + resourceSliceUpdated = st.FromResourceSlice(resourceSlice).Device("instance-1", map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{attrName: {BoolValue: ptr.To(true)}}).Obj() ) func reserve(claim *resourceapi.ResourceClaim, pod *v1.Pod) *resourceapi.ResourceClaim { @@ -1499,16 +1504,16 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { gotHint, err := testCtx.p.isSchedulableAfterClaimChange(logger, tc.pod, oldObj, newObj) if tc.wantErr { if err == nil { - t.Fatal("expected an error, got none") + t.Fatal("want an error, got none") } return } if err != nil { - t.Fatalf("expected no error, got: %v", err) + t.Fatalf("want no error, got: %v", err) } if tc.wantHint != gotHint { - t.Fatalf("expected hint %#v, got %#v", tc.wantHint, gotHint) + t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String()) } }) } @@ -1577,16 +1582,16 @@ func Test_isSchedulableAfterPodChange(t *testing.T) { gotHint, err := testCtx.p.isSchedulableAfterPodChange(logger, tc.pod, nil, tc.obj) if tc.wantErr { if err == nil { - t.Fatal("expected an error, got none") + t.Fatal("want an error, got none") } return } if err != nil { - t.Fatalf("expected no error, got: %v", err) + t.Fatalf("want no error, got: %v", err) } if tc.wantHint != gotHint { - t.Fatalf("expected hint %#v, got %#v", tc.wantHint, gotHint) + t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String()) } }) } @@ -1716,16 +1721,121 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { gotHint, err := testCtx.p.isSchedulableAfterPodSchedulingContextChange(logger, tc.pod, tc.oldObj, tc.newObj) if tc.wantErr { if err == nil { - t.Fatal("expected an error, got none") + t.Fatal("want an error, got none") } return } if err != nil { - t.Fatalf("expected no error, got: %v", err) + t.Fatalf("want no error, got: %v", err) } if tc.wantHint != gotHint { - t.Fatalf("expected hint %#v, got %#v", tc.wantHint, gotHint) + t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String()) + } + }) + } +} + +func Test_isSchedulableAfterResourceSliceChange(t *testing.T) { + testcases := map[string]struct { + pod *v1.Pod + claims []*resourceapi.ResourceClaim + oldObj, newObj interface{} + wantHint framework.QueueingHint + wantErr bool + }{ + "queue-new-resource-slice": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{pendingClaim}, + newObj: resourceSlice, + wantHint: framework.Queue, + }, + "queue1-update-resource-slice-with-claim-is-allocated": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{allocatedClaim}, + oldObj: resourceSlice, + newObj: resourceSliceUpdated, + wantHint: framework.Queue, + }, + "queue-update-resource-slice-with-claim-is-deleting": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{deleteClaim}, + oldObj: resourceSlice, + newObj: resourceSliceUpdated, + wantHint: framework.QueueSkip, + }, + "queue-new-resource-slice-with-two-claim": { + pod: podWithTwoClaimNames, + claims: []*resourceapi.ResourceClaim{pendingClaim, pendingClaim2}, + oldObj: resourceSlice, + newObj: resourceSliceUpdated, + wantHint: framework.Queue, + }, + "queue-update-resource-slice-with-two-claim-but-one-hasn't-been-created": { + pod: podWithTwoClaimNames, + claims: []*resourceapi.ResourceClaim{pendingClaim}, + oldObj: resourceSlice, + newObj: resourceSliceUpdated, + wantHint: framework.QueueSkip, + }, + "queue-update-resource-slice": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{pendingClaim}, + oldObj: resourceSlice, + newObj: resourceSliceUpdated, + wantHint: framework.Queue, + }, + "skip-not-find-resource-claim": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{}, + oldObj: resourceSlice, + newObj: resourceSliceUpdated, + wantHint: framework.QueueSkip, + }, + "backoff-unexpected-object-with-oldObj-newObj": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{pendingClaim}, + oldObj: scheduling, + newObj: scheduling, + wantErr: true, + }, + "backoff-unexpected-object-with-oldObj": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{pendingClaim}, + oldObj: scheduling, + newObj: resourceSlice, + wantErr: true, + }, + "backoff-unexpected-object-with-newObj": { + pod: podWithClaimName, + claims: []*resourceapi.ResourceClaim{pendingClaim}, + oldObj: resourceSlice, + newObj: scheduling, + wantErr: true, + }, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + logger, _ := ktesting.NewTestContext(t) + features := feature.Features{ + EnableDynamicResourceAllocation: true, + } + testCtx := setup(t, nil, tc.claims, nil, nil, nil, features) + gotHint, err := testCtx.p.isSchedulableAfterResourceSliceChange(logger, tc.pod, tc.oldObj, tc.newObj) + if tc.wantErr { + if err == nil { + t.Fatal("want an error, got none") + } + return + } + + if err != nil { + t.Fatalf("want no error, got: %v", err) + } + if tc.wantHint != gotHint { + t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String()) } }) } diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 570e37c5710..6ec85003535 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -983,6 +983,12 @@ func (wrapper *ResourceClaimWrapper) Allocation(allocation *resourceapi.Allocati return wrapper } +// Deleting sets the deletion timestamp of the inner object. +func (wrapper *ResourceClaimWrapper) Deleting(time metav1.Time) *ResourceClaimWrapper { + wrapper.ResourceClaim.DeletionTimestamp = &time + return wrapper +} + // Structured turns a "normal" claim into one which was allocated via structured parameters. // The only difference is that there is no controller name and the special finalizer // gets added. @@ -1007,7 +1013,7 @@ func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourceapi.Resour return wrapper } -// ReservedFor sets that field of the inner object given information about one pod. +// ReservedForPod sets that field of the inner object given information about one pod. func (wrapper *ResourceClaimWrapper) ReservedForPod(podName string, podUID types.UID) *ResourceClaimWrapper { return wrapper.ReservedFor(resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: podUID}) } @@ -1105,10 +1111,16 @@ func MakeResourceSlice(nodeName, driverName string) *ResourceSliceWrapper { return wrapper } +// FromResourceSlice creates a ResourceSlice wrapper from some existing object. +func FromResourceSlice(other *resourceapi.ResourceSlice) *ResourceSliceWrapper { + return &ResourceSliceWrapper{*other.DeepCopy()} +} + func (wrapper *ResourceSliceWrapper) Obj() *resourceapi.ResourceSlice { return &wrapper.ResourceSlice } +// Devices sets the devices field of the inner object. func (wrapper *ResourceSliceWrapper) Devices(names ...string) *ResourceSliceWrapper { for _, name := range names { wrapper.Spec.Devices = append(wrapper.Spec.Devices, resourceapi.Device{Name: name}) @@ -1116,6 +1128,7 @@ func (wrapper *ResourceSliceWrapper) Devices(names ...string) *ResourceSliceWrap return wrapper } +// Device sets the devices field of the inner object. func (wrapper *ResourceSliceWrapper) Device(name string, attrs map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) *ResourceSliceWrapper { wrapper.Spec.Devices = append(wrapper.Spec.Devices, resourceapi.Device{Name: name, Basic: &resourceapi.BasicDevice{Attributes: attrs}}) return wrapper