mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
Merge pull request #131679 from mortent/automated-cherry-pick-of-#131662-upstream-release-1.33
Automated cherry pick of #131662: DRA: Fix failure to allocate large number of devices
This commit is contained in:
commit
3a09aeb4fa
@ -745,9 +745,10 @@ func (alloc *allocator) allocateOne(r deviceIndices, allocateSubRequest bool) (b
|
||||
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex + 1}, false)
|
||||
}
|
||||
|
||||
// Before trying to allocate devices, check if allocating the devices
|
||||
// in the current request will put us over the threshold.
|
||||
numDevicesAfterAlloc := len(alloc.result[r.claimIndex].devices) + requestData.numDevices
|
||||
// We can calculate this by adding the number of already allocated devices with the number
|
||||
// of devices in the current request, and then finally subtract the deviceIndex since we
|
||||
// don't want to double count any devices already allocated for the current request.
|
||||
numDevicesAfterAlloc := len(alloc.result[r.claimIndex].devices) + requestData.numDevices - r.deviceIndex
|
||||
if numDevicesAfterAlloc > resourceapi.AllocationResultsMaxSize {
|
||||
// Don't return an error here since we want to keep searching for
|
||||
// a solution that works.
|
||||
|
@ -395,6 +395,14 @@ func deviceAllocationResult(request, driver, pool, device string, adminAccess bo
|
||||
return r
|
||||
}
|
||||
|
||||
func multipleDeviceAllocationResults(request, driver, pool string, count, startIndex int) []resourceapi.DeviceRequestAllocationResult {
|
||||
var results []resourceapi.DeviceRequestAllocationResult
|
||||
for i := startIndex; i < startIndex+count; i++ {
|
||||
results = append(results, deviceAllocationResult(request, driver, pool, fmt.Sprintf("device-%d", i), false))
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// nodeLabelSelector creates a node selector with a label match for "key" in "values".
|
||||
func nodeLabelSelector(key string, values ...string) *v1.NodeSelector {
|
||||
requirements := []v1.NodeSelectorRequirement{{
|
||||
@ -3024,6 +3032,21 @@ func TestAllocator(t *testing.T) {
|
||||
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||
)},
|
||||
},
|
||||
"max-number-devices": {
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(
|
||||
claim0, nil, request(req0, classA, resourceapi.AllocationResultsMaxSize),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithMultipleDevices(slice1, node1, pool1, driverA, resourceapi.AllocationResultsMaxSize)),
|
||||
node: node(node1, region1),
|
||||
|
||||
expectResults: []any{allocationResult(
|
||||
localNodeSelector(node1),
|
||||
multipleDeviceAllocationResults(req0, driverA, pool1, resourceapi.AllocationResultsMaxSize, 0)...,
|
||||
)},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testcases {
|
||||
|
Loading…
Reference in New Issue
Block a user