diff --git a/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator.go b/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator.go index e12f29aeb3a..51f950fb132 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator.go @@ -43,7 +43,7 @@ import ( type Allocator struct { adminAccessEnabled bool claimsToAllocate []*resourceapi.ResourceClaim - allocatedDevices []DeviceID + allocatedDevices sets.Set[DeviceID] classLister resourcelisters.DeviceClassLister slices []*resourceapi.ResourceSlice celCache *CELCache @@ -66,11 +66,12 @@ func NewAllocator(ctx context.Context, return &Allocator{ adminAccessEnabled: adminAccessEnabled, claimsToAllocate: claimsToAllocate, - allocatedDevices: allocatedDevices, - classLister: classLister, - slices: slices, - celCache: celCache, - celMutex: keymutex.NewHashed(0), + // This won't change, so build this set only once. + allocatedDevices: sets.New(allocatedDevices...), + classLister: classLister, + slices: slices, + celCache: celCache, + celMutex: keymutex.NewHashed(0), }, nil } @@ -295,18 +296,10 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult [] // the Allocate call and can be compared in Go. alloc.deviceMatchesRequest = make(map[matchKey]bool) - // Some of the existing devices are probably already allocated by - // claims... - numAllocated := len(alloc.allocatedDevices) + // We can estimate the size based on what we need to allocate. + alloc.allocatingDevices = make(map[DeviceID]bool, numDevicesTotal) - // Pre-allocating the map avoids growing it later. We can estimate - // the size based on what is already allocated and what we need to - // allocate. - alloc.allocated = make(map[DeviceID]bool, numAllocated+numDevicesTotal) - for _, deviceID := range alloc.allocatedDevices { - alloc.allocated[deviceID] = true - } - alloc.logger.V(6).Info("Gathered information about allocated devices", "numAllocated", numAllocated) + alloc.logger.V(6).Info("Gathered information about devices", "numAllocated", len(alloc.allocatedDevices), "toBeAllocated", numDevicesTotal) // In practice, there aren't going to be many different CEL // expressions. Most likely, there is going to be handful of different @@ -393,7 +386,7 @@ type allocator struct { deviceMatchesRequest map[matchKey]bool constraints [][]constraint // one list of constraints per claim requestData map[requestIndices]requestData // one entry per request - allocated map[DeviceID]bool + allocatingDevices map[DeviceID]bool result []internalAllocationResult } @@ -620,7 +613,7 @@ func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) { deviceID := DeviceID{Driver: pool.Driver, Pool: pool.Pool, Device: slice.Spec.Devices[deviceIndex].Name} // Checking for "in use" is cheap and thus gets done first. - if !ptr.Deref(request.AdminAccess, false) && alloc.allocated[deviceID] { + if !ptr.Deref(request.AdminAccess, false) && (alloc.allocatedDevices.Has(deviceID) || alloc.allocatingDevices[deviceID]) { alloc.logger.V(7).Info("Device in use", "device", deviceID) continue } @@ -775,7 +768,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus claim := alloc.claimsToAllocate[r.claimIndex] request := &claim.Spec.Devices.Requests[r.requestIndex] adminAccess := ptr.Deref(request.AdminAccess, false) - if !adminAccess && alloc.allocated[device.id] { + if !adminAccess && (alloc.allocatedDevices.Has(device.id) || alloc.allocatingDevices[device.id]) { alloc.logger.V(7).Info("Device in use", "device", device.id) return false, nil, nil } @@ -802,7 +795,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus // and record the result. alloc.logger.V(7).Info("Device allocated", "device", device.id) if !adminAccess { - alloc.allocated[device.id] = true + alloc.allocatingDevices[device.id] = true } result := internalDeviceResult{ request: request.Name, @@ -820,7 +813,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus constraint.remove(request.Name, device.basic, device.id) } if !adminAccess { - alloc.allocated[device.id] = false + alloc.allocatingDevices[device.id] = false } // Truncate, but keep the underlying slice. alloc.result[r.claimIndex].devices = alloc.result[r.claimIndex].devices[:previousNumResults]