mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 02:34:03 +00:00
Merge pull request #118257 from pohly/dra-scheduler-plugin-loopvar-fix
dra scheduler plugin test: fix loopvar bug and "reserve" expected data
This commit is contained in:
commit
f7cfb5f02f
@ -104,7 +104,7 @@ var (
|
|||||||
Obj()
|
Obj()
|
||||||
inUseClaim = st.FromResourceClaim(pendingImmediateClaim).
|
inUseClaim = st.FromResourceClaim(pendingImmediateClaim).
|
||||||
Allocation(&resourcev1alpha2.AllocationResult{}).
|
Allocation(&resourcev1alpha2.AllocationResult{}).
|
||||||
ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{UID: types.UID(podUID)}).
|
ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}).
|
||||||
Obj()
|
Obj()
|
||||||
allocatedClaim = st.FromResourceClaim(pendingDelayedClaim).
|
allocatedClaim = st.FromResourceClaim(pendingDelayedClaim).
|
||||||
OwnerReference(podName, podUID, podKind).
|
OwnerReference(podName, podUID, podKind).
|
||||||
@ -214,10 +214,36 @@ func TestPlugin(t *testing.T) {
|
|||||||
"claim-reference": {
|
"claim-reference": {
|
||||||
pod: podWithClaimName,
|
pod: podWithClaimName,
|
||||||
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
|
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
|
||||||
|
want: want{
|
||||||
|
reserve: result{
|
||||||
|
changes: change{
|
||||||
|
claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
|
||||||
|
if claim.Name == claimName {
|
||||||
|
claim = claim.DeepCopy()
|
||||||
|
claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
|
||||||
|
}
|
||||||
|
return claim
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"claim-template": {
|
"claim-template": {
|
||||||
pod: podWithClaimTemplate,
|
pod: podWithClaimTemplate,
|
||||||
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
|
claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
|
||||||
|
want: want{
|
||||||
|
reserve: result{
|
||||||
|
changes: change{
|
||||||
|
claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
|
||||||
|
if claim.Name == claimName {
|
||||||
|
claim = claim.DeepCopy()
|
||||||
|
claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
|
||||||
|
}
|
||||||
|
return claim
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"missing-claim": {
|
"missing-claim": {
|
||||||
pod: podWithClaimTemplate,
|
pod: podWithClaimTemplate,
|
||||||
@ -589,11 +615,13 @@ func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) {
|
|||||||
claims, err := tc.client.ResourceV1alpha2().ResourceClaims("").List(tc.ctx, metav1.ListOptions{})
|
claims, err := tc.client.ResourceV1alpha2().ResourceClaims("").List(tc.ctx, metav1.ListOptions{})
|
||||||
require.NoError(t, err, "list claims")
|
require.NoError(t, err, "list claims")
|
||||||
for _, claim := range claims.Items {
|
for _, claim := range claims.Items {
|
||||||
|
claim := claim
|
||||||
objects = append(objects, &claim)
|
objects = append(objects, &claim)
|
||||||
}
|
}
|
||||||
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{})
|
schedulings, err := tc.client.ResourceV1alpha2().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{})
|
||||||
require.NoError(t, err, "list pod scheduling")
|
require.NoError(t, err, "list pod scheduling")
|
||||||
for _, scheduling := range schedulings.Items {
|
for _, scheduling := range schedulings.Items {
|
||||||
|
scheduling := scheduling
|
||||||
objects = append(objects, &scheduling)
|
objects = append(objects, &scheduling)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user