mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
DRA: Update quota calculations for Prioritized Alternatives in Device Requests
This commit is contained in:
parent
a716095a8a
commit
cc35f9b8e8
@ -114,6 +114,38 @@ func (p *claimEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error)
|
||||
// charge for claim
|
||||
result[ClaimObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
for _, request := range claim.Spec.Devices.Requests {
|
||||
if len(request.FirstAvailable) > 0 {
|
||||
// If there are subrequests, we want to use the worst case per device class
|
||||
// to quota. So for each device class, we need to find the max number of
|
||||
// devices that might be allocated.
|
||||
maxQuantityByDeviceClassClaim := make(map[corev1.ResourceName]resource.Quantity)
|
||||
for _, subrequest := range request.FirstAvailable {
|
||||
deviceClassClaim := V1ResourceByDeviceClass(subrequest.DeviceClassName)
|
||||
var numDevices int64
|
||||
switch subrequest.AllocationMode {
|
||||
case resourceapi.DeviceAllocationModeExactCount:
|
||||
numDevices = subrequest.Count
|
||||
case resourceapi.DeviceAllocationModeAll:
|
||||
// Worst case...
|
||||
numDevices = resourceapi.AllocationResultsMaxSize
|
||||
default:
|
||||
// Could happen after a downgrade. Unknown modes
|
||||
// don't count towards the quota and users shouldn't
|
||||
// expect that when downgrading.
|
||||
}
|
||||
|
||||
q := resource.NewQuantity(numDevices, resource.DecimalSI)
|
||||
if q.Cmp(maxQuantityByDeviceClassClaim[deviceClassClaim]) > 0 {
|
||||
maxQuantityByDeviceClassClaim[deviceClassClaim] = *q
|
||||
}
|
||||
}
|
||||
for deviceClassClaim, q := range maxQuantityByDeviceClassClaim {
|
||||
quantity := result[deviceClassClaim]
|
||||
quantity.Add(q)
|
||||
result[deviceClassClaim] = quantity
|
||||
}
|
||||
continue
|
||||
}
|
||||
deviceClassClaim := V1ResourceByDeviceClass(request.DeviceClassName)
|
||||
var numDevices int64
|
||||
switch request.AllocationMode {
|
||||
|
@ -39,7 +39,25 @@ func testResourceClaim(name string, namespace string, spec api.ResourceClaimSpec
|
||||
|
||||
func TestResourceClaimEvaluatorUsage(t *testing.T) {
|
||||
classGpu := "gpu"
|
||||
classTpu := "tpu"
|
||||
validClaim := testResourceClaim("foo", "ns", api.ResourceClaimSpec{Devices: api.DeviceClaim{Requests: []api.DeviceRequest{{Name: "req-0", DeviceClassName: classGpu, AllocationMode: api.DeviceAllocationModeExactCount, Count: 1}}}})
|
||||
validClaimWithPrioritizedList := testResourceClaim("foo", "ns", api.ResourceClaimSpec{
|
||||
Devices: api.DeviceClaim{
|
||||
Requests: []api.DeviceRequest{
|
||||
{
|
||||
Name: "req-0",
|
||||
FirstAvailable: []api.DeviceSubRequest{
|
||||
{
|
||||
Name: "subreq-0",
|
||||
DeviceClassName: classGpu,
|
||||
AllocationMode: api.DeviceAllocationModeExactCount,
|
||||
Count: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
evaluator := NewResourceClaimEvaluator(nil)
|
||||
testCases := map[string]struct {
|
||||
@ -112,6 +130,61 @@ func TestResourceClaimEvaluatorUsage(t *testing.T) {
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"prioritized-list": {
|
||||
claim: validClaimWithPrioritizedList,
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"prioritized-list-multiple-subrequests": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaimWithPrioritizedList.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].FirstAvailable[0].Count = 2
|
||||
claim.Spec.Devices.Requests[0].FirstAvailable = append(claim.Spec.Devices.Requests[0].FirstAvailable, api.DeviceSubRequest{
|
||||
Name: "subreq-1",
|
||||
DeviceClassName: classGpu,
|
||||
AllocationMode: api.DeviceAllocationModeExactCount,
|
||||
Count: 1,
|
||||
})
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
"prioritized-list-multiple-subrequests-allocation-mode-all": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaimWithPrioritizedList.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].FirstAvailable = append(claim.Spec.Devices.Requests[0].FirstAvailable, api.DeviceSubRequest{
|
||||
Name: "subreq-1",
|
||||
DeviceClassName: classGpu,
|
||||
AllocationMode: api.DeviceAllocationModeAll,
|
||||
})
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("32"),
|
||||
},
|
||||
},
|
||||
"prioritized-list-multiple-subrequests-different-device-classes": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaimWithPrioritizedList.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].FirstAvailable = append(claim.Spec.Devices.Requests[0].FirstAvailable, api.DeviceSubRequest{
|
||||
Name: "subreq-1",
|
||||
DeviceClassName: classTpu,
|
||||
AllocationMode: api.DeviceAllocationModeAll,
|
||||
})
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
"tpu.deviceclass.resource.k8s.io/devices": resource.MustParse("32"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
|
Loading…
Reference in New Issue
Block a user