mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
chore(scheduler dra): improve dra queue hint unit test
This commit is contained in:
parent
22a30e7cbb
commit
24a28766d4
@ -125,6 +125,9 @@ var (
|
||||
Namespace(namespace).
|
||||
Request(className).
|
||||
Obj()
|
||||
deleteClaim = st.FromResourceClaim(claim).
|
||||
OwnerReference(podName, podUID, podKind).
|
||||
Deleting(metav1.Now()).Obj()
|
||||
pendingClaim = st.FromResourceClaim(claim).
|
||||
OwnerReference(podName, podUID, podKind).
|
||||
Obj()
|
||||
@ -188,6 +191,8 @@ var (
|
||||
ResourceClaims(resourceapi.ResourceClaimSchedulingStatus{Name: resourceName},
|
||||
resourceapi.ResourceClaimSchedulingStatus{Name: resourceName2}).
|
||||
Obj()
|
||||
resourceSlice = st.MakeResourceSlice(nodeName, driver).Device("instance-1", nil).Obj()
|
||||
resourceSliceUpdated = st.FromResourceSlice(resourceSlice).Device("instance-1", map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{attrName: {BoolValue: ptr.To(true)}}).Obj()
|
||||
)
|
||||
|
||||
func reserve(claim *resourceapi.ResourceClaim, pod *v1.Pod) *resourceapi.ResourceClaim {
|
||||
@ -1499,16 +1504,16 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) {
|
||||
gotHint, err := testCtx.p.isSchedulableAfterClaimChange(logger, tc.pod, oldObj, newObj)
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatal("expected an error, got none")
|
||||
t.Fatal("want an error, got none")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
t.Fatalf("want no error, got: %v", err)
|
||||
}
|
||||
if tc.wantHint != gotHint {
|
||||
t.Fatalf("expected hint %#v, got %#v", tc.wantHint, gotHint)
|
||||
t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -1577,16 +1582,16 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
|
||||
gotHint, err := testCtx.p.isSchedulableAfterPodChange(logger, tc.pod, nil, tc.obj)
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatal("expected an error, got none")
|
||||
t.Fatal("want an error, got none")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
t.Fatalf("want no error, got: %v", err)
|
||||
}
|
||||
if tc.wantHint != gotHint {
|
||||
t.Fatalf("expected hint %#v, got %#v", tc.wantHint, gotHint)
|
||||
t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -1716,16 +1721,121 @@ func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) {
|
||||
gotHint, err := testCtx.p.isSchedulableAfterPodSchedulingContextChange(logger, tc.pod, tc.oldObj, tc.newObj)
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatal("expected an error, got none")
|
||||
t.Fatal("want an error, got none")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
t.Fatalf("want no error, got: %v", err)
|
||||
}
|
||||
if tc.wantHint != gotHint {
|
||||
t.Fatalf("expected hint %#v, got %#v", tc.wantHint, gotHint)
|
||||
t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_isSchedulableAfterResourceSliceChange(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
claims []*resourceapi.ResourceClaim
|
||||
oldObj, newObj interface{}
|
||||
wantHint framework.QueueingHint
|
||||
wantErr bool
|
||||
}{
|
||||
"queue-new-resource-slice": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{pendingClaim},
|
||||
newObj: resourceSlice,
|
||||
wantHint: framework.Queue,
|
||||
},
|
||||
"queue1-update-resource-slice-with-claim-is-allocated": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{allocatedClaim},
|
||||
oldObj: resourceSlice,
|
||||
newObj: resourceSliceUpdated,
|
||||
wantHint: framework.Queue,
|
||||
},
|
||||
"queue-update-resource-slice-with-claim-is-deleting": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{deleteClaim},
|
||||
oldObj: resourceSlice,
|
||||
newObj: resourceSliceUpdated,
|
||||
wantHint: framework.QueueSkip,
|
||||
},
|
||||
"queue-new-resource-slice-with-two-claim": {
|
||||
pod: podWithTwoClaimNames,
|
||||
claims: []*resourceapi.ResourceClaim{pendingClaim, pendingClaim2},
|
||||
oldObj: resourceSlice,
|
||||
newObj: resourceSliceUpdated,
|
||||
wantHint: framework.Queue,
|
||||
},
|
||||
"queue-update-resource-slice-with-two-claim-but-one-hasn't-been-created": {
|
||||
pod: podWithTwoClaimNames,
|
||||
claims: []*resourceapi.ResourceClaim{pendingClaim},
|
||||
oldObj: resourceSlice,
|
||||
newObj: resourceSliceUpdated,
|
||||
wantHint: framework.QueueSkip,
|
||||
},
|
||||
"queue-update-resource-slice": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{pendingClaim},
|
||||
oldObj: resourceSlice,
|
||||
newObj: resourceSliceUpdated,
|
||||
wantHint: framework.Queue,
|
||||
},
|
||||
"skip-not-find-resource-claim": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{},
|
||||
oldObj: resourceSlice,
|
||||
newObj: resourceSliceUpdated,
|
||||
wantHint: framework.QueueSkip,
|
||||
},
|
||||
"backoff-unexpected-object-with-oldObj-newObj": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{pendingClaim},
|
||||
oldObj: scheduling,
|
||||
newObj: scheduling,
|
||||
wantErr: true,
|
||||
},
|
||||
"backoff-unexpected-object-with-oldObj": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{pendingClaim},
|
||||
oldObj: scheduling,
|
||||
newObj: resourceSlice,
|
||||
wantErr: true,
|
||||
},
|
||||
"backoff-unexpected-object-with-newObj": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourceapi.ResourceClaim{pendingClaim},
|
||||
oldObj: resourceSlice,
|
||||
newObj: scheduling,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for name, tc := range testcases {
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
features := feature.Features{
|
||||
EnableDynamicResourceAllocation: true,
|
||||
}
|
||||
testCtx := setup(t, nil, tc.claims, nil, nil, nil, features)
|
||||
gotHint, err := testCtx.p.isSchedulableAfterResourceSliceChange(logger, tc.pod, tc.oldObj, tc.newObj)
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatal("want an error, got none")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("want no error, got: %v", err)
|
||||
}
|
||||
if tc.wantHint != gotHint {
|
||||
t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -983,6 +983,12 @@ func (wrapper *ResourceClaimWrapper) Allocation(allocation *resourceapi.Allocati
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// Deleting sets the deletion timestamp of the inner object.
|
||||
func (wrapper *ResourceClaimWrapper) Deleting(time metav1.Time) *ResourceClaimWrapper {
|
||||
wrapper.ResourceClaim.DeletionTimestamp = &time
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// Structured turns a "normal" claim into one which was allocated via structured parameters.
|
||||
// The only difference is that there is no controller name and the special finalizer
|
||||
// gets added.
|
||||
@ -1007,7 +1013,7 @@ func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourceapi.Resour
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// ReservedFor sets that field of the inner object given information about one pod.
|
||||
// ReservedForPod sets that field of the inner object given information about one pod.
|
||||
func (wrapper *ResourceClaimWrapper) ReservedForPod(podName string, podUID types.UID) *ResourceClaimWrapper {
|
||||
return wrapper.ReservedFor(resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: podUID})
|
||||
}
|
||||
@ -1105,10 +1111,16 @@ func MakeResourceSlice(nodeName, driverName string) *ResourceSliceWrapper {
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// FromResourceSlice creates a ResourceSlice wrapper from some existing object.
|
||||
func FromResourceSlice(other *resourceapi.ResourceSlice) *ResourceSliceWrapper {
|
||||
return &ResourceSliceWrapper{*other.DeepCopy()}
|
||||
}
|
||||
|
||||
func (wrapper *ResourceSliceWrapper) Obj() *resourceapi.ResourceSlice {
|
||||
return &wrapper.ResourceSlice
|
||||
}
|
||||
|
||||
// Devices sets the devices field of the inner object.
|
||||
func (wrapper *ResourceSliceWrapper) Devices(names ...string) *ResourceSliceWrapper {
|
||||
for _, name := range names {
|
||||
wrapper.Spec.Devices = append(wrapper.Spec.Devices, resourceapi.Device{Name: name})
|
||||
@ -1116,6 +1128,7 @@ func (wrapper *ResourceSliceWrapper) Devices(names ...string) *ResourceSliceWrap
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// Device sets the devices field of the inner object.
|
||||
func (wrapper *ResourceSliceWrapper) Device(name string, attrs map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) *ResourceSliceWrapper {
|
||||
wrapper.Spec.Devices = append(wrapper.Spec.Devices, resourceapi.Device{Name: name, Basic: &resourceapi.BasicDevice{Attributes: attrs}})
|
||||
return wrapper
|
||||
|
Loading…
Reference in New Issue
Block a user