mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
dra scheduler plugin test: fix loopvar bug and "reserve" expected data
The `listAll` function returned a slice where all pointers referred to the same instance. That instance had the value of the last list entry. As a result, unit tests only compared that element. During the reserve phase, the first claim gets reserved in two test cases. Those two tests must expect that change. That hadn't been noticed before because that first claim didn't get compared.
This commit is contained in:
parent
e11c5284ad
commit
7a6b4a9215
@ -104,7 +104,7 @@ var (
|
||||
Obj()
|
||||
inUseClaim = st.FromResourceClaim(pendingImmediateClaim).
|
||||
Allocation(&resourcev1alpha2.AllocationResult{}).
|
||||
ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{UID: types.UID(podUID)}).
|
||||
ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}).
|
||||
Obj()
|
||||
allocatedClaim = st.FromResourceClaim(pendingDelayedClaim).
|
||||
OwnerReference(podName, podUID, podKind).
|
||||
@ -214,10 +214,36 @@ func TestPlugin(t *testing.T) {
|
||||
"claim-reference": {
|
||||
pod: podWithClaimName,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
|
||||
want: want{
|
||||
reserve: result{
|
||||
changes: change{
|
||||
claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
|
||||
if claim.Name == claimName {
|
||||
claim = claim.DeepCopy()
|
||||
claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"claim-template": {
|
||||
pod: podWithClaimTemplate,
|
||||
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
|
||||
want: want{
|
||||
reserve: result{
|
||||
changes: change{
|
||||
claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
|
||||
if claim.Name == claimName {
|
||||
claim = claim.DeepCopy()
|
||||
claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"missing-claim": {
|
||||
pod: podWithClaimTemplate,
|
||||
@ -589,11 +615,13 @@ func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) {
|
||||
claims, err := tc.client.ResourceV1alpha2().ResourceClaims("").List(tc.ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "list claims")
|
||||
for _, claim := range claims.Items {
|
||||
claim := claim
|
||||
objects = append(objects, &claim)
|
||||
}
|
||||
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "list pod scheduling")
|
||||
for _, scheduling := range schedulings.Items {
|
||||
scheduling := scheduling
|
||||
objects = append(objects, &scheduling)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user