diff --git a/pkg/quota/v1/evaluator/core/resource_claims.go b/pkg/quota/v1/evaluator/core/resource_claims.go index f649640884f..587bf647e85 100644 --- a/pkg/quota/v1/evaluator/core/resource_claims.go +++ b/pkg/quota/v1/evaluator/core/resource_claims.go @@ -114,6 +114,38 @@ func (p *claimEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) // charge for claim result[ClaimObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) for _, request := range claim.Spec.Devices.Requests { + if len(request.FirstAvailable) > 0 { + // If there are subrequests, we want to use the worst case per device class + // to quota. So for each device class, we need to find the max number of + // devices that might be allocated. + maxQuantityByDeviceClassClaim := make(map[corev1.ResourceName]resource.Quantity) + for _, subrequest := range request.FirstAvailable { + deviceClassClaim := V1ResourceByDeviceClass(subrequest.DeviceClassName) + var numDevices int64 + switch subrequest.AllocationMode { + case resourceapi.DeviceAllocationModeExactCount: + numDevices = subrequest.Count + case resourceapi.DeviceAllocationModeAll: + // Worst case... + numDevices = resourceapi.AllocationResultsMaxSize + default: + // Could happen after a downgrade. Unknown modes + // don't count towards the quota and users shouldn't + // expect that when downgrading. + } + + q := resource.NewQuantity(numDevices, resource.DecimalSI) + if q.Cmp(maxQuantityByDeviceClassClaim[deviceClassClaim]) > 0 { + maxQuantityByDeviceClassClaim[deviceClassClaim] = *q + } + } + for deviceClassClaim, q := range maxQuantityByDeviceClassClaim { + quantity := result[deviceClassClaim] + quantity.Add(q) + result[deviceClassClaim] = quantity + } + continue + } deviceClassClaim := V1ResourceByDeviceClass(request.DeviceClassName) var numDevices int64 switch request.AllocationMode { diff --git a/pkg/quota/v1/evaluator/core/resource_claims_test.go b/pkg/quota/v1/evaluator/core/resource_claims_test.go index 33bbbbbb314..2f137e49edc 100644 --- a/pkg/quota/v1/evaluator/core/resource_claims_test.go +++ b/pkg/quota/v1/evaluator/core/resource_claims_test.go @@ -39,7 +39,25 @@ func testResourceClaim(name string, namespace string, spec api.ResourceClaimSpec func TestResourceClaimEvaluatorUsage(t *testing.T) { classGpu := "gpu" + classTpu := "tpu" validClaim := testResourceClaim("foo", "ns", api.ResourceClaimSpec{Devices: api.DeviceClaim{Requests: []api.DeviceRequest{{Name: "req-0", DeviceClassName: classGpu, AllocationMode: api.DeviceAllocationModeExactCount, Count: 1}}}}) + validClaimWithPrioritizedList := testResourceClaim("foo", "ns", api.ResourceClaimSpec{ + Devices: api.DeviceClaim{ + Requests: []api.DeviceRequest{ + { + Name: "req-0", + FirstAvailable: []api.DeviceSubRequest{ + { + Name: "subreq-0", + DeviceClassName: classGpu, + AllocationMode: api.DeviceAllocationModeExactCount, + Count: 1, + }, + }, + }, + }, + }, + }) evaluator := NewResourceClaimEvaluator(nil) testCases := map[string]struct { @@ -112,6 +130,61 @@ func TestResourceClaimEvaluatorUsage(t *testing.T) { "gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"), }, }, + "prioritized-list": { + claim: validClaimWithPrioritizedList, + usage: corev1.ResourceList{ + "count/resourceclaims.resource.k8s.io": resource.MustParse("1"), + "gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"), + }, + }, + "prioritized-list-multiple-subrequests": { + claim: func() *api.ResourceClaim { + claim := validClaimWithPrioritizedList.DeepCopy() + claim.Spec.Devices.Requests[0].FirstAvailable[0].Count = 2 + claim.Spec.Devices.Requests[0].FirstAvailable = append(claim.Spec.Devices.Requests[0].FirstAvailable, api.DeviceSubRequest{ + Name: "subreq-1", + DeviceClassName: classGpu, + AllocationMode: api.DeviceAllocationModeExactCount, + Count: 1, + }) + return claim + }(), + usage: corev1.ResourceList{ + "count/resourceclaims.resource.k8s.io": resource.MustParse("1"), + "gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("2"), + }, + }, + "prioritized-list-multiple-subrequests-allocation-mode-all": { + claim: func() *api.ResourceClaim { + claim := validClaimWithPrioritizedList.DeepCopy() + claim.Spec.Devices.Requests[0].FirstAvailable = append(claim.Spec.Devices.Requests[0].FirstAvailable, api.DeviceSubRequest{ + Name: "subreq-1", + DeviceClassName: classGpu, + AllocationMode: api.DeviceAllocationModeAll, + }) + return claim + }(), + usage: corev1.ResourceList{ + "count/resourceclaims.resource.k8s.io": resource.MustParse("1"), + "gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("32"), + }, + }, + "prioritized-list-multiple-subrequests-different-device-classes": { + claim: func() *api.ResourceClaim { + claim := validClaimWithPrioritizedList.DeepCopy() + claim.Spec.Devices.Requests[0].FirstAvailable = append(claim.Spec.Devices.Requests[0].FirstAvailable, api.DeviceSubRequest{ + Name: "subreq-1", + DeviceClassName: classTpu, + AllocationMode: api.DeviceAllocationModeAll, + }) + return claim + }(), + usage: corev1.ResourceList{ + "count/resourceclaims.resource.k8s.io": resource.MustParse("1"), + "gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"), + "tpu.deviceclass.resource.k8s.io/devices": resource.MustParse("32"), + }, + }, } for testName, testCase := range testCases { t.Run(testName, func(t *testing.T) {