Allocator updates

This commit is contained in:
Morten Torkildsen 2025-03-12 16:57:39 -07:00 committed by Cici Huang
parent ece1d76e80
commit ecba6cde1d
4 changed files with 816 additions and 131 deletions

View File

@ -454,7 +454,12 @@ func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
if err != nil { if err != nil {
return nil, statusError(logger, err) return nil, statusError(logger, err)
} }
allocator, err := structured.NewAllocator(ctx, pl.enableAdminAccess, pl.enablePrioritizedList, pl.enableDeviceTaints, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache) features := structured.Features{
AdminAccess: pl.enableAdminAccess,
PrioritizedList: pl.enablePrioritizedList,
DeviceTaints: pl.enableDeviceTaints,
}
allocator, err := structured.NewAllocator(ctx, features, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache)
if err != nil { if err != nil {
return nil, statusError(logger, err) return nil, statusError(logger, err)
} }

View File

@ -27,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1beta1" resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/dynamic-resource-allocation/api"
draapi "k8s.io/dynamic-resource-allocation/api" draapi "k8s.io/dynamic-resource-allocation/api"
"k8s.io/dynamic-resource-allocation/cel" "k8s.io/dynamic-resource-allocation/cel"
"k8s.io/dynamic-resource-allocation/resourceclaim" "k8s.io/dynamic-resource-allocation/resourceclaim"
@ -48,9 +49,7 @@ type deviceClassLister interface {
// available and the current state of the cluster (claims, classes, resource // available and the current state of the cluster (claims, classes, resource
// slices). // slices).
type Allocator struct { type Allocator struct {
adminAccessEnabled bool features Features
prioritizedListEnabled bool
deviceTaintsEnabled bool
claimsToAllocate []*resourceapi.ResourceClaim claimsToAllocate []*resourceapi.ResourceClaim
allocatedDevices sets.Set[DeviceID] allocatedDevices sets.Set[DeviceID]
classLister deviceClassLister classLister deviceClassLister
@ -58,14 +57,18 @@ type Allocator struct {
celCache *cel.Cache celCache *cel.Cache
} }
type Features struct {
AdminAccess bool
PrioritizedList bool
PartitionableDevices bool
}
// NewAllocator returns an allocator for a certain set of claims or an error if // NewAllocator returns an allocator for a certain set of claims or an error if
// some problem was detected which makes it impossible to allocate claims. // some problem was detected which makes it impossible to allocate claims.
// //
// The returned Allocator can be used multiple times and is thread-safe. // The returned Allocator can be used multiple times and is thread-safe.
func NewAllocator(ctx context.Context, func NewAllocator(ctx context.Context,
adminAccessEnabled bool, features Features,
prioritizedListEnabled bool,
deviceTaintsEnabled bool,
claimsToAllocate []*resourceapi.ResourceClaim, claimsToAllocate []*resourceapi.ResourceClaim,
allocatedDevices sets.Set[DeviceID], allocatedDevices sets.Set[DeviceID],
classLister deviceClassLister, classLister deviceClassLister,
@ -73,9 +76,7 @@ func NewAllocator(ctx context.Context,
celCache *cel.Cache, celCache *cel.Cache,
) (*Allocator, error) { ) (*Allocator, error) {
return &Allocator{ return &Allocator{
adminAccessEnabled: adminAccessEnabled, features: features,
prioritizedListEnabled: prioritizedListEnabled,
deviceTaintsEnabled: deviceTaintsEnabled,
claimsToAllocate: claimsToAllocate, claimsToAllocate: claimsToAllocate,
allocatedDevices: allocatedDevices, allocatedDevices: allocatedDevices,
classLister: classLister, classLister: classLister,
@ -126,7 +127,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
defer alloc.logger.V(5).Info("Done with allocation", "success", len(finalResult) == len(alloc.claimsToAllocate), "err", finalErr) defer alloc.logger.V(5).Info("Done with allocation", "success", len(finalResult) == len(alloc.claimsToAllocate), "err", finalErr)
// First determine all eligible pools. // First determine all eligible pools.
pools, err := GatherPools(ctx, alloc.slices, node) pools, err := GatherPools(ctx, alloc.slices, node, a.features.PartitionableDevices)
if err != nil { if err != nil {
return nil, fmt.Errorf("gather pool information: %w", err) return nil, fmt.Errorf("gather pool information: %w", err)
} }
@ -514,7 +515,7 @@ type requestData struct {
type deviceWithID struct { type deviceWithID struct {
id DeviceID id DeviceID
basic *draapi.BasicDevice device *draapi.Device
slice *draapi.ResourceSlice slice *draapi.ResourceSlice
} }
@ -526,6 +527,7 @@ type internalDeviceResult struct {
request string // name of the request (if no subrequests) or the subrequest request string // name of the request (if no subrequests) or the subrequest
parentRequest string // name of the request which contains the subrequest, empty otherwise parentRequest string // name of the request which contains the subrequest, empty otherwise
id DeviceID id DeviceID
device *draapi.Device
slice *draapi.ResourceSlice slice *draapi.ResourceSlice
adminAccess *bool adminAccess *bool
} }
@ -621,7 +623,7 @@ func (m *matchAttributeConstraint) add(requestName, subRequestName string, devic
return true return true
} }
func (m *matchAttributeConstraint) remove(requestName, subRequestName string, device *draapi.BasicDevice, deviceID DeviceID) { func (m *matchAttributeConstraint) remove(requestName, subRequestName string, device *draapi.Device, deviceID DeviceID) {
if m.requestNames.Len() > 0 && !m.matches(requestName, subRequestName) { if m.requestNames.Len() > 0 && !m.matches(requestName, subRequestName) {
// Device not affected by constraint. // Device not affected by constraint.
return return
@ -640,7 +642,7 @@ func (m *matchAttributeConstraint) matches(requestName, subRequestName string) b
} }
} }
func lookupAttribute(device *draapi.BasicDevice, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute { func lookupAttribute(device *draapi.Device, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
// Fully-qualified match? // Fully-qualified match?
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok { if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok {
return &attr return &attr
@ -808,7 +810,7 @@ func (alloc *allocator) allocateOne(r deviceIndices, allocateSubRequest bool) (b
// Finally treat as allocated and move on to the next device. // Finally treat as allocated and move on to the next device.
device := deviceWithID{ device := deviceWithID{
id: deviceID, id: deviceID,
basic: slice.Spec.Devices[deviceIndex].Basic, device: &slice.Spec.Devices[deviceIndex],
slice: slice, slice: slice,
} }
allocated, deallocate, err := alloc.allocateDevice(r, device, false) allocated, deallocate, err := alloc.allocateDevice(r, device, false)
@ -888,7 +890,7 @@ func (alloc *allocator) isSelectable(r requestIndices, requestData requestData,
} }
func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDevice, deviceID DeviceID, class *resourceapi.DeviceClass, selectors []resourceapi.DeviceSelector) (bool, error) { func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.Device, deviceID DeviceID, class *resourceapi.DeviceClass, selectors []resourceapi.DeviceSelector) (bool, error) {
for i, selector := range selectors { for i, selector := range selectors {
expr := alloc.celCache.GetOrCompile(selector.CEL.Expression) expr := alloc.celCache.GetOrCompile(selector.CEL.Expression)
if expr.Error != nil { if expr.Error != nil {
@ -903,13 +905,15 @@ func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDev
return false, fmt.Errorf("claim %s: selector #%d: CEL compile error: %w", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), i, expr.Error) return false, fmt.Errorf("claim %s: selector #%d: CEL compile error: %w", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), i, expr.Error)
} }
// If this conversion turns out to be expensive, the CEL package could be converted attributes := make(map[resourceapi.QualifiedName]resourceapi.DeviceAttribute)
// to use unique strings. if err := draapi.Convert_api_Attributes_To_v1beta1_Attributes(device.Attributes, attributes); err != nil {
var d resourceapi.BasicDevice return false, fmt.Errorf("convert attributes: %w", err)
if err := draapi.Convert_api_BasicDevice_To_v1beta1_BasicDevice(device, &d, nil); err != nil {
return false, fmt.Errorf("convert BasicDevice: %w", err)
} }
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: d.Attributes, Capacity: d.Capacity}) capacity := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity)
if err := draapi.Convert_api_Capacity_To_v1beta1_Capacity(device.Capacity, capacity); err != nil {
return false, fmt.Errorf("convert capacity: %w", err)
}
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: attributes, Capacity: capacity})
if class != nil { if class != nil {
alloc.logger.V(7).Info("CEL result", "device", deviceID, "class", klog.KObj(class), "selector", i, "expression", selector.CEL.Expression, "matches", matches, "actualCost", ptr.Deref(details.ActualCost(), 0), "err", err) alloc.logger.V(7).Info("CEL result", "device", deviceID, "class", klog.KObj(class), "selector", i, "expression", selector.CEL.Expression, "matches", matches, "actualCost", ptr.Deref(details.ActualCost(), 0), "err", err)
} else { } else {
@ -949,6 +953,17 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
return false, nil, nil return false, nil, nil
} }
// If a device consumes capacity from a capacity pool, verify that
// there is sufficient capacity available.
ok, err := alloc.checkAvailableCapacity(device)
if err != nil {
return false, nil, err
}
if !ok {
alloc.logger.V(7).Info("Insufficient capacity", "device", device.id)
return false, nil, nil
}
var parentRequestName string var parentRequestName string
var baseRequestName string var baseRequestName string
var subRequestName string var subRequestName string
@ -968,7 +983,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
// It's available. Now check constraints. // It's available. Now check constraints.
for i, constraint := range alloc.constraints[r.claimIndex] { for i, constraint := range alloc.constraints[r.claimIndex] {
added := constraint.add(baseRequestName, subRequestName, device.basic, device.id) added := constraint.add(baseRequestName, subRequestName, device.device, device.id)
if !added { if !added {
if must { if must {
// It does not make sense to declare a claim where a constraint prevents getting // It does not make sense to declare a claim where a constraint prevents getting
@ -978,7 +993,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
// Roll back for all previous constraints before we return. // Roll back for all previous constraints before we return.
for e := 0; e < i; e++ { for e := 0; e < i; e++ {
alloc.constraints[r.claimIndex][e].remove(baseRequestName, subRequestName, device.basic, device.id) alloc.constraints[r.claimIndex][e].remove(baseRequestName, subRequestName, device.device, device.id)
} }
return false, nil, nil return false, nil, nil
} }
@ -994,6 +1009,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
request: request.name(), request: request.name(),
parentRequest: parentRequestName, parentRequest: parentRequestName,
id: device.id, id: device.id,
device: device.device,
slice: device.slice, slice: device.slice,
} }
if request.adminAccess() { if request.adminAccess() {
@ -1004,7 +1020,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
return true, func() { return true, func() {
for _, constraint := range alloc.constraints[r.claimIndex] { for _, constraint := range alloc.constraints[r.claimIndex] {
constraint.remove(baseRequestName, subRequestName, device.basic, device.id) constraint.remove(baseRequestName, subRequestName, device.device, device.id)
} }
if !request.adminAccess() { if !request.adminAccess() {
alloc.allocatingDevices[device.id] = false alloc.allocatingDevices[device.id] = false
@ -1033,18 +1049,96 @@ func taintTolerated(taint resourceapi.DeviceTaint, request requestAccessor) bool
return false return false
} }
func (alloc *allocator) checkAvailableCapacity(device deviceWithID) (bool, error) {
slice := device.slice
referencedCapacityPools := sets.New[api.UniqueString]()
for _, consumedCapacity := range device.device.ConsumesCapacity {
referencedCapacityPools.Insert(consumedCapacity.CapacityPool)
}
// Create a structure that captures the initial capacity for all pools
// referenced by the device.
availableCapacities := make(map[api.UniqueString]map[api.QualifiedName]api.DeviceCapacity)
for _, capacityPool := range slice.Spec.CapacityPools {
if !referencedCapacityPools.Has(capacityPool.Name) {
continue
}
poolCapacity := make(map[api.QualifiedName]api.DeviceCapacity)
for name, cap := range capacityPool.Capacity {
poolCapacity[name] = cap
}
availableCapacities[capacityPool.Name] = poolCapacity
}
// Update the data structure to reflect capacity already in use.
for _, device := range slice.Spec.Devices {
deviceID := DeviceID{
Driver: slice.Spec.Driver,
Pool: slice.Spec.Pool.Name,
Device: device.Name,
}
if !(alloc.allocatedDevices.Has(deviceID) || alloc.allocatingDevices[deviceID]) {
continue
}
for _, consumedCapacity := range device.ConsumesCapacity {
poolCapacity := availableCapacities[consumedCapacity.CapacityPool]
for name, cap := range consumedCapacity.Capacity {
existingCap, ok := poolCapacity[name]
if !ok {
// Just continue for now, but this probably should be an error.
continue
}
// This can potentially result in negative available capacity. That is fine,
// we just treat it as no capacity available.
existingCap.Value.Sub(cap.Value)
poolCapacity[name] = existingCap
}
}
}
// Check if all consumed capacities for the device can be satisfied.
for _, deviceConsumedCapacity := range device.device.ConsumesCapacity {
poolCapacity := availableCapacities[deviceConsumedCapacity.CapacityPool]
for name, cap := range deviceConsumedCapacity.Capacity {
availableCap, found := poolCapacity[name]
// If the device requests a capacity that doesn't exist in
// the pool, it can not be allocated.
if !found {
return false, nil
}
// If the device requests more capacity than is available, it
// can not be allocated.
if availableCap.Value.Cmp(cap.Value) < 0 {
return false, nil
}
}
}
return true, nil
}
// createNodeSelector constructs a node selector for the allocation, if needed, // createNodeSelector constructs a node selector for the allocation, if needed,
// otherwise it returns nil. // otherwise it returns nil.
func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.NodeSelector, error) { func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.NodeSelector, error) {
// Selector with one term. That term gets extended with additional // Selector with one term. That term gets extended with additional
// requirements from the different devices. // requirements from the different devices.
nodeSelector := &v1.NodeSelector{ ns := &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{{}}, NodeSelectorTerms: []v1.NodeSelectorTerm{{}},
} }
for i := range result { for i := range result {
slice := result[i].slice slice := result[i].slice
if slice.Spec.NodeName != draapi.NullUniqueString { var nodeName draapi.UniqueString
var nodeSelector *v1.NodeSelector
if slice.Spec.PerDeviceNodeSelection {
nodeName = result[i].device.NodeName
nodeSelector = result[i].device.NodeSelector
} else {
nodeName = slice.Spec.NodeName
nodeSelector = slice.Spec.NodeSelector
}
if nodeName != draapi.NullUniqueString {
// At least one device is local to one node. This // At least one device is local to one node. This
// restricts the allocation to that node. // restricts the allocation to that node.
return &v1.NodeSelector{ return &v1.NodeSelector{
@ -1052,29 +1146,29 @@ func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.N
MatchFields: []v1.NodeSelectorRequirement{{ MatchFields: []v1.NodeSelectorRequirement{{
Key: "metadata.name", Key: "metadata.name",
Operator: v1.NodeSelectorOpIn, Operator: v1.NodeSelectorOpIn,
Values: []string{slice.Spec.NodeName.String()}, Values: []string{nodeName.String()},
}}, }},
}}, }},
}, nil }, nil
} }
if slice.Spec.NodeSelector != nil { if nodeSelector != nil {
switch len(slice.Spec.NodeSelector.NodeSelectorTerms) { switch len(nodeSelector.NodeSelectorTerms) {
case 0: case 0:
// Nothing? // Nothing?
case 1: case 1:
// Add all terms if they are not present already. // Add all terms if they are not present already.
addNewNodeSelectorRequirements(slice.Spec.NodeSelector.NodeSelectorTerms[0].MatchFields, &nodeSelector.NodeSelectorTerms[0].MatchFields) addNewNodeSelectorRequirements(nodeSelector.NodeSelectorTerms[0].MatchFields, &ns.NodeSelectorTerms[0].MatchFields)
addNewNodeSelectorRequirements(slice.Spec.NodeSelector.NodeSelectorTerms[0].MatchExpressions, &nodeSelector.NodeSelectorTerms[0].MatchExpressions) addNewNodeSelectorRequirements(nodeSelector.NodeSelectorTerms[0].MatchExpressions, &ns.NodeSelectorTerms[0].MatchExpressions)
default: default:
// This shouldn't occur, validation must prevent creation of such slices. // This shouldn't occur, validation must prevent creation of such slices.
return nil, fmt.Errorf("unsupported ResourceSlice.NodeSelector with %d terms", len(slice.Spec.NodeSelector.NodeSelectorTerms)) return nil, fmt.Errorf("unsupported ResourceSlice.NodeSelector with %d terms", len(nodeSelector.NodeSelectorTerms))
} }
} }
} }
if len(nodeSelector.NodeSelectorTerms[0].MatchFields) > 0 || len(nodeSelector.NodeSelectorTerms[0].MatchExpressions) > 0 { if len(ns.NodeSelectorTerms[0].MatchFields) > 0 || len(ns.NodeSelectorTerms[0].MatchExpressions) > 0 {
// We have a valid node selector. // We have a valid node selector.
return nodeSelector, nil return ns, nil
} }
// Available everywhere. // Available everywhere.

View File

@ -71,6 +71,8 @@ const (
device2 = "device-2" device2 = "device-2"
device3 = "device-3" device3 = "device-3"
device4 = "device-4" device4 = "device-4"
capacityPool1 = "capacity-pool-1"
capacityPool2 = "capacity-pool-2"
) )
func init() { func init() {
@ -241,11 +243,71 @@ func device(name string, capacity map[resourceapi.QualifiedName]resource.Quantit
Name: name, Name: name,
Basic: &resourceapi.BasicDevice{ Basic: &resourceapi.BasicDevice{
Attributes: attributes, Attributes: attributes,
Capacity: toDeviceCapacity(capacity),
}, },
} }
device.Basic.Capacity = make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity, len(capacity)) return wrapDevice(device)
for name, quantity := range capacity { }
device.Basic.Capacity[name] = resourceapi.DeviceCapacity{Value: quantity}
const (
fromDeviceCapacityConsumption = "fromDeviceCapacityConsumption"
)
func compositeDevice(name string, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
consumesCapacity ...resourceapi.DeviceCapacityConsumption) resourceapi.Device {
device := resourceapi.Device{
Name: name,
Composite: &resourceapi.CompositeDevice{
Attributes: attributes,
},
}
switch capacity := capacity.(type) {
case map[resourceapi.QualifiedName]resource.Quantity:
device.Composite.Capacity = toDeviceCapacity(capacity)
case string:
if capacity == fromDeviceCapacityConsumption {
c := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity)
for _, dcc := range consumesCapacity {
for name, cap := range dcc.Capacity {
if _, found := c[name]; found {
panic(fmt.Sprintf("same capacity found in multiple device capacity consumptions %q", name))
}
c[name] = cap
}
}
device.Composite.Capacity = c
} else {
panic(fmt.Sprintf("unexpected capacity value %q", capacity))
}
case nil:
// nothing to do
default:
panic(fmt.Sprintf("unexpected capacity type %T: %+v", capacity, capacity))
}
device.Composite.ConsumesCapacity = consumesCapacity
return device
}
func compositeDeviceWithNodeSelector(name string, nodeSelection any, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
consumesCapacity ...resourceapi.DeviceCapacityConsumption) resourceapi.Device {
device := compositeDevice(name, capacity, attributes, consumesCapacity...)
switch nodeSelection := nodeSelection.(type) {
case *v1.NodeSelector:
device.Composite.NodeSelector = nodeSelection
case string:
if nodeSelection == nodeSelectionAll {
device.Composite.AllNodes = true
} else if nodeSelection == nodeSelectionPerDevice {
panic("nodeSelectionPerDevice is not supported for devices")
} else {
device.Composite.NodeName = nodeSelection
}
default:
panic(fmt.Sprintf("unexpected nodeSelection type %T: %+v", nodeSelection, nodeSelection))
} }
return wrapDevice(device) return wrapDevice(device)
} }
@ -263,10 +325,24 @@ func (in wrapDevice) withTaints(taints ...resourceapi.DeviceTaint) wrapDevice {
return wrapDevice(*device) return wrapDevice(*device)
} }
func deviceCapacityConsumption(capacityPool string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.DeviceCapacityConsumption {
return resourceapi.DeviceCapacityConsumption{
CapacityPool: capacityPool,
Capacity: toDeviceCapacity(capacity),
}
}
const (
nodeSelectionAll = "nodeSelectionAll"
nodeSelectionPerDevice = "nodeSelectionPerDevice"
)
// generate a ResourceSlice object with the given name, node, // generate a ResourceSlice object with the given name, node,
// driver and pool names, generation and a list of devices. // driver and pool names, generation and a list of devices.
// The nodeSelection parameter may be a string (= node name), // The nodeSelection parameter may be a string with the value
// true (= all nodes), or a node selector (= specific nodes). // nodeSelectionAll for all nodes, the value nodeSelectionPerDevice
// for per device node selection, or any other value to set the
// node name. Providing a node selectors sets the NodeSelector field.
func slice(name string, nodeSelection any, pool, driver string, devices ...wrapDevice) *resourceapi.ResourceSlice { func slice(name string, nodeSelection any, pool, driver string, devices ...wrapDevice) *resourceapi.ResourceSlice {
slice := &resourceapi.ResourceSlice{ slice := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -287,13 +363,14 @@ func slice(name string, nodeSelection any, pool, driver string, devices ...wrapD
switch nodeSelection := nodeSelection.(type) { switch nodeSelection := nodeSelection.(type) {
case *v1.NodeSelector: case *v1.NodeSelector:
slice.Spec.NodeSelector = nodeSelection slice.Spec.NodeSelector = nodeSelection
case bool:
if !nodeSelection {
panic("nodeSelection == false is not valid")
}
slice.Spec.AllNodes = true
case string: case string:
if nodeSelection == nodeSelectionAll {
slice.Spec.AllNodes = true
} else if nodeSelection == nodeSelectionPerDevice {
slice.Spec.PerDeviceNodeSelection = true
} else {
slice.Spec.NodeName = nodeSelection slice.Spec.NodeName = nodeSelection
}
default: default:
panic(fmt.Sprintf("unexpected nodeSelection type %T: %+v", nodeSelection, nodeSelection)) panic(fmt.Sprintf("unexpected nodeSelection type %T: %+v", nodeSelection, nodeSelection))
} }
@ -449,6 +526,28 @@ func sliceWithMultipleDevices(name string, nodeSelection any, pool, driver strin
return slice(name, nodeSelection, pool, driver, devices...) return slice(name, nodeSelection, pool, driver, devices...)
} }
func sliceWithCapacityPools(name string, nodeSelection any, pool, driver string, capacityPools []resourceapi.CapacityPool, devices ...resourceapi.Device) *resourceapi.ResourceSlice {
slice := slice(name, nodeSelection, pool, driver)
slice.Spec.CapacityPools = capacityPools
slice.Spec.Devices = devices
return slice
}
func capacityPool(name string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.CapacityPool {
return resourceapi.CapacityPool{
Name: name,
Capacity: toDeviceCapacity(capacity),
}
}
func toDeviceCapacity(capacity map[resourceapi.QualifiedName]resource.Quantity) map[resourceapi.QualifiedName]resourceapi.DeviceCapacity {
out := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity, len(capacity))
for name, quantity := range capacity {
out[name] = resourceapi.DeviceCapacity{Value: quantity}
}
return out
}
func TestAllocator(t *testing.T) { func TestAllocator(t *testing.T) {
nonExistentAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "NonExistentAttribute") nonExistentAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "NonExistentAttribute")
boolAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "boolAttribute") boolAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "boolAttribute")
@ -481,9 +580,7 @@ func TestAllocator(t *testing.T) {
} }
testcases := map[string]struct { testcases := map[string]struct {
adminAccess bool features Features
prioritizedList bool
deviceTaints bool
claimsToAllocate []wrapResourceClaim claimsToAllocate []wrapResourceClaim
allocatedDevices []DeviceID allocatedDevices []DeviceID
classes []*resourceapi.DeviceClass classes []*resourceapi.DeviceClass
@ -1020,7 +1117,9 @@ func TestAllocator(t *testing.T) {
expectResults: nil, expectResults: nil,
}, },
"all-devices-some-allocated-admin-access": { "all-devices-some-allocated-admin-access": {
adminAccess: true, features: Features{
AdminAccess: true,
},
claimsToAllocate: func() []wrapResourceClaim { claimsToAllocate: func() []wrapResourceClaim {
c := claim(claim0, req0, classA) c := claim(claim0, req0, classA)
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true) c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
@ -1146,7 +1245,7 @@ func TestAllocator(t *testing.T) {
classes: objects(class(classA, driverA)), classes: objects(class(classA, driverA)),
slices: objects( slices: objects(
sliceWithOneDevice(slice1, nodeLabelSelector(regionKey, region1), pool1, driverA), sliceWithOneDevice(slice1, nodeLabelSelector(regionKey, region1), pool1, driverA),
sliceWithOneDevice(slice1, true, pool2, driverA), sliceWithOneDevice(slice1, nodeSelectionAll, pool2, driverA),
sliceWithOneDevice(slice1, nodeLabelSelector(planetKey, planetValueEarth), pool3, driverA), sliceWithOneDevice(slice1, nodeLabelSelector(planetKey, planetValueEarth), pool3, driverA),
sliceWithOneDevice(slice1, localNodeSelector(node1), pool4, driverA), sliceWithOneDevice(slice1, localNodeSelector(node1), pool4, driverA),
), ),
@ -1217,7 +1316,9 @@ func TestAllocator(t *testing.T) {
expectResults: nil, expectResults: nil,
}, },
"admin-access-disabled": { "admin-access-disabled": {
adminAccess: false, features: Features{
AdminAccess: false,
},
claimsToAllocate: func() []wrapResourceClaim { claimsToAllocate: func() []wrapResourceClaim {
c := claim(claim0, req0, classA) c := claim(claim0, req0, classA)
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true) c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
@ -1231,7 +1332,9 @@ func TestAllocator(t *testing.T) {
expectError: gomega.MatchError(gomega.ContainSubstring("claim claim-0, request req-0: admin access is requested, but the feature is disabled")), expectError: gomega.MatchError(gomega.ContainSubstring("claim claim-0, request req-0: admin access is requested, but the feature is disabled")),
}, },
"admin-access-enabled": { "admin-access-enabled": {
adminAccess: true, features: Features{
AdminAccess: true,
},
claimsToAllocate: func() []wrapResourceClaim { claimsToAllocate: func() []wrapResourceClaim {
c := claim(claim0, req0, classA) c := claim(claim0, req0, classA)
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true) c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
@ -1544,18 +1647,6 @@ func TestAllocator(t *testing.T) {
expectError: gomega.MatchError(gomega.ContainSubstring("empty constraint (unsupported constraint type?)")), expectError: gomega.MatchError(gomega.ContainSubstring("empty constraint (unsupported constraint type?)")),
}, },
"unknown-device": {
claimsToAllocate: objects(claim(claim0, req0, classA)),
classes: objects(class(classA, driverA)),
slices: objects(
func() *resourceapi.ResourceSlice {
slice := sliceWithOneDevice(slice1, node1, pool1, driverA)
slice.Spec.Devices[0].Basic = nil /* empty = unknown future extension */
return slice
}(),
),
node: node(node1, region1),
},
"invalid-CEL-one-device": { "invalid-CEL-one-device": {
claimsToAllocate: objects( claimsToAllocate: objects(
func() wrapResourceClaim { func() wrapResourceClaim {
@ -2104,8 +2195,469 @@ func TestAllocator(t *testing.T) {
deviceAllocationResult(req0SubReq1, driverA, pool1, device1, false), deviceAllocationResult(req0SubReq1, driverA, pool1, device1, false),
)}, )},
}, },
"partitionable-devices-single-device": {
features: Features{
PartitionableDevices: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil, request(req0, classA, 1)),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("8Gi"),
},
),
),
compositeDevice(device1, nil, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
)),
node: node(node1, region1),
expectResults: []any{allocationResult(
localNodeSelector(node1),
deviceAllocationResult(req0, driverA, pool1, device1, false),
)},
},
"partitionable-devices-multiple-devices": {
features: Features{
PrioritizedList: true,
PartitionableDevices: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil,
request(req0, classA, 1),
requestWithPrioritizedList(req1,
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
CEL: &resourceapi.CELDeviceSelector{
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("6Gi")) >= 0`, driverA),
}},
),
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
CEL: &resourceapi.CELDeviceSelector{
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
}},
),
),
),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("8Gi"),
},
),
),
compositeDevice(device1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
}, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDevice(device2,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("6Gi"),
}, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("6Gi"),
},
),
),
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
)),
node: node(node1, region1),
expectResults: []any{allocationResult(
localNodeSelector(node1),
deviceAllocationResult(req0, driverA, pool1, device1, false),
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
)},
},
"partitionable-devices-multiple-capacity-pools": {
features: Features{
PrioritizedList: true,
PartitionableDevices: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil,
request(req0, classA, 1),
requestWithPrioritizedList(req1,
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
CEL: &resourceapi.CELDeviceSelector{
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("6Gi")) >= 0`, driverA),
}},
),
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
CEL: &resourceapi.CELDeviceSelector{
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
}},
),
),
),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("18Gi"),
},
),
capacityPool(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"cpus": resource.MustParse("8"),
},
),
),
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
deviceCapacityConsumption(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"cpus": resource.MustParse("4"),
},
),
),
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("6Gi"),
},
),
deviceCapacityConsumption(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"cpus": resource.MustParse("6"),
},
),
),
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
deviceCapacityConsumption(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"cpus": resource.MustParse("4"),
},
),
),
)),
node: node(node1, region1),
expectResults: []any{allocationResult(
localNodeSelector(node1),
deviceAllocationResult(req0, driverA, pool1, device1, false),
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
)},
},
"partitionable-devices-no-capacity-available": {
features: Features{
PartitionableDevices: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil,
request(req0, classA, 1),
),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("18Gi"),
},
),
),
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("16Gi"),
},
),
),
)),
allocatedDevices: []DeviceID{
MakeDeviceID(driverA, pool1, device2),
},
node: node(node1, region1),
expectResults: nil,
},
"partitionable-devices-overallocated-capacity-pool": {
features: Features{
PartitionableDevices: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil,
request(req0, classA, 1),
),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("18Gi"),
},
),
),
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("20Gi"),
},
),
),
)),
allocatedDevices: []DeviceID{
MakeDeviceID(driverA, pool1, device2),
},
node: node(node1, region1),
expectResults: nil,
},
"partitionable-devices-disabled": {
features: Features{
PartitionableDevices: false,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil,
request(req0, classA, 1),
),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("18Gi"),
},
),
),
compositeDevice(device1, nil, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
)),
node: node(node1, region1),
expectResults: nil,
},
"partitionable-devices-per-device-node-selection-nodename": {
features: Features{
PartitionableDevices: true,
PrioritizedList: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil,
requestWithPrioritizedList(req0,
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
CEL: &resourceapi.CELDeviceSelector{
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("6Gi")) >= 0`, driverA),
}},
),
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
CEL: &resourceapi.CELDeviceSelector{
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
}},
),
),
),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("18Gi"),
},
),
),
compositeDeviceWithNodeSelector(device1, node1, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDeviceWithNodeSelector(device2, node2, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("6Gi"),
},
),
),
)),
node: node(node1, region1),
expectResults: []any{allocationResult(
localNodeSelector(node1),
deviceAllocationResult(req0SubReq1, driverA, pool1, device1, false),
)},
},
"partitionable-devices-per-device-node-selection-node-selector": {
features: Features{
PartitionableDevices: true,
PrioritizedList: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil, request(req0, classA, 1)),
),
classes: objects(class(classA, driverA)),
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("18Gi"),
},
),
),
compositeDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
)),
node: node(node1, region1),
expectResults: []any{allocationResult(
&v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{{
MatchExpressions: []v1.NodeSelectorRequirement{
{Key: regionKey, Operator: v1.NodeSelectorOpIn, Values: []string{region1}},
},
}},
},
deviceAllocationResult(req0, driverA, pool1, device1, false),
)},
},
"partitionable-devices-per-device-node-selection-node-selector-multiple-devices": {
features: Features{
PartitionableDevices: true,
PrioritizedList: true,
},
claimsToAllocate: objects(
claimWithRequests(claim0, nil,
request(req0, classA, 3),
request(req1, classB, 3),
),
),
classes: objects(class(classA, driverA), class(classB, driverB)),
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
objects(
capacityPool(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("18Gi"),
},
),
),
compositeDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDeviceWithNodeSelector(device2, node1, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDeviceWithNodeSelector(device3, nodeSelectionAll, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool1,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
), sliceWithCapacityPools(slice2, node1, pool2, driverB,
objects(
capacityPool(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("12Gi"),
},
),
),
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
deviceCapacityConsumption(capacityPool2,
map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("4Gi"),
},
),
),
)),
node: node(node1, region1),
expectResults: []any{allocationResult(
&v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{{
MatchFields: []v1.NodeSelectorRequirement{
{Key: fieldNameKey, Operator: v1.NodeSelectorOpIn, Values: []string{node1}},
},
}},
},
deviceAllocationResult(req0, driverA, pool1, device1, false),
deviceAllocationResult(req0, driverA, pool1, device2, false),
deviceAllocationResult(req0, driverA, pool1, device3, false),
deviceAllocationResult(req1, driverB, pool2, device1, false),
deviceAllocationResult(req1, driverB, pool2, device2, false),
deviceAllocationResult(req1, driverB, pool2, device3, false),
)},
},
"tainted-two-devices": { "tainted-two-devices": {
deviceTaints: true, features: Features{
DeviceTaints: true,
},
claimsToAllocate: objects(claim(claim0, req0, classA)), claimsToAllocate: objects(claim(claim0, req0, classA)),
classes: objects(class(classA, driverA)), classes: objects(class(classA, driverA)),
slices: objects(slice(slice1, node1, pool1, driverA, slices: objects(slice(slice1, node1, pool1, driverA,
@ -2115,7 +2667,9 @@ func TestAllocator(t *testing.T) {
node: node(node1, region1), node: node(node1, region1),
}, },
"tainted-one-device-two-taints": { "tainted-one-device-two-taints": {
deviceTaints: true, features: Features{
DeviceTaints: true,
},
claimsToAllocate: objects(claim(claim0, req0, classA)), claimsToAllocate: objects(claim(claim0, req0, classA)),
classes: objects(class(classA, driverA)), classes: objects(class(classA, driverA)),
slices: objects(slice(slice1, node1, pool1, driverA, slices: objects(slice(slice1, node1, pool1, driverA,
@ -2124,7 +2678,9 @@ func TestAllocator(t *testing.T) {
node: node(node1, region1), node: node(node1, region1),
}, },
"tainted-two-devices-tolerated": { "tainted-two-devices-tolerated": {
deviceTaints: true, features: Features{
DeviceTaints: true,
},
claimsToAllocate: objects(claim(claim0, req0, classA).withTolerations(tolerationNoExecute)), claimsToAllocate: objects(claim(claim0, req0, classA).withTolerations(tolerationNoExecute)),
classes: objects(class(classA, driverA)), classes: objects(class(classA, driverA)),
slices: objects(slice(slice1, node1, pool1, driverA, slices: objects(slice(slice1, node1, pool1, driverA,
@ -2138,7 +2694,9 @@ func TestAllocator(t *testing.T) {
)}, )},
}, },
"tainted-one-device-two-taints-both-tolerated": { "tainted-one-device-two-taints-both-tolerated": {
deviceTaints: true, features: Features{
DeviceTaints: true,
},
claimsToAllocate: objects(claim(claim0, req0, classA).withTolerations(tolerationNoSchedule, tolerationNoExecute)), claimsToAllocate: objects(claim(claim0, req0, classA).withTolerations(tolerationNoSchedule, tolerationNoExecute)),
classes: objects(class(classA, driverA)), classes: objects(class(classA, driverA)),
slices: objects(slice(slice1, node1, pool1, driverA, slices: objects(slice(slice1, node1, pool1, driverA,
@ -2151,7 +2709,9 @@ func TestAllocator(t *testing.T) {
)}, )},
}, },
"tainted-disabled": { "tainted-disabled": {
deviceTaints: false, features: Features{
DeviceTaints: true,
},
claimsToAllocate: objects(claim(claim0, req0, classA)), claimsToAllocate: objects(claim(claim0, req0, classA)),
classes: objects(class(classA, driverA)), classes: objects(class(classA, driverA)),
slices: objects(slice(slice1, node1, pool1, driverA, slices: objects(slice(slice1, node1, pool1, driverA,
@ -2164,8 +2724,10 @@ func TestAllocator(t *testing.T) {
)}, )},
}, },
"tainted-prioritized-list": { "tainted-prioritized-list": {
deviceTaints: true, features: Features{
prioritizedList: true, DeviceTaints: true,
PrioritizedList: true,
},
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0, claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
subRequest(subReq0, classB, 1), subRequest(subReq0, classB, 1),
subRequest(subReq1, classA, 1), subRequest(subReq1, classA, 1),
@ -2177,8 +2739,10 @@ func TestAllocator(t *testing.T) {
node: node(node1, region1), node: node(node1, region1),
}, },
"tainted-prioritized-list-disabled": { "tainted-prioritized-list-disabled": {
deviceTaints: false, features: Features{
prioritizedList: true, DeviceTaints: false,
PrioritizedList: true,
},
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0, claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
subRequest(subReq0, classB, 1), subRequest(subReq0, classB, 1),
subRequest(subReq1, classA, 1), subRequest(subReq1, classA, 1),
@ -2195,8 +2759,10 @@ func TestAllocator(t *testing.T) {
)}, )},
}, },
"tainted-admin-access": { "tainted-admin-access": {
deviceTaints: true, features: Features{
adminAccess: true, DeviceTaints: true,
PrioritizedList: true,
},
claimsToAllocate: func() []wrapResourceClaim { claimsToAllocate: func() []wrapResourceClaim {
c := claim(claim0, req0, classA) c := claim(claim0, req0, classA)
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true) c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
@ -2213,8 +2779,10 @@ func TestAllocator(t *testing.T) {
node: node(node1, region1), node: node(node1, region1),
}, },
"tainted-admin-access-disabled": { "tainted-admin-access-disabled": {
deviceTaints: false, features: Features{
adminAccess: true, DeviceTaints: false,
AdminAccess: true,
},
claimsToAllocate: func() []wrapResourceClaim { claimsToAllocate: func() []wrapResourceClaim {
c := claim(claim0, req0, classA) c := claim(claim0, req0, classA)
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true) c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
@ -2236,7 +2804,9 @@ func TestAllocator(t *testing.T) {
)}, )},
}, },
"tainted-all-devices-single": { "tainted-all-devices-single": {
deviceTaints: true, features: Features{
DeviceTaints: true,
},
claimsToAllocate: objects(claimWithRequests(claim0, nil, resourceapi.DeviceRequest{ claimsToAllocate: objects(claimWithRequests(claim0, nil, resourceapi.DeviceRequest{
Name: req0, Name: req0,
AllocationMode: resourceapi.DeviceAllocationModeAll, AllocationMode: resourceapi.DeviceAllocationModeAll,
@ -2249,7 +2819,9 @@ func TestAllocator(t *testing.T) {
node: node(node1, region1), node: node(node1, region1),
}, },
"tainted-all-devices-single-disabled": { "tainted-all-devices-single-disabled": {
deviceTaints: false, features: Features{
DeviceTaints: false,
},
claimsToAllocate: objects(claimWithRequests(claim0, nil, resourceapi.DeviceRequest{ claimsToAllocate: objects(claimWithRequests(claim0, nil, resourceapi.DeviceRequest{
Name: req0, Name: req0,
AllocationMode: resourceapi.DeviceAllocationModeAll, AllocationMode: resourceapi.DeviceAllocationModeAll,
@ -2284,7 +2856,7 @@ func TestAllocator(t *testing.T) {
allocatedDevices := slices.Clone(tc.allocatedDevices) allocatedDevices := slices.Clone(tc.allocatedDevices)
slices := slices.Clone(tc.slices) slices := slices.Clone(tc.slices)
allocator, err := NewAllocator(ctx, tc.adminAccess, tc.prioritizedList, tc.deviceTaints, unwrap(claimsToAllocate...), sets.New(allocatedDevices...), classLister, slices, cel.NewCache(1)) allocator, err := NewAllocator(ctx, tc.features, unwrap(claimsToAllocate...), sets.New(allocatedDevices...), classLister, slices, cel.NewCache(1))
g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(err).ToNot(gomega.HaveOccurred())
results, err := allocator.Allocate(ctx, tc.node) results, err := allocator.Allocate(ctx, tc.node)

View File

@ -33,7 +33,7 @@ import (
// Out-dated slices are silently ignored. Pools may be incomplete (not all // Out-dated slices are silently ignored. Pools may be incomplete (not all
// required slices available) or invalid (for example, device names not unique). // required slices available) or invalid (for example, device names not unique).
// Both is recorded in the result. // Both is recorded in the result.
func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node) ([]*Pool, error) { func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node, partitionableDevicesEnabled bool) ([]*Pool, error) {
pools := make(map[PoolID]*Pool) pools := make(map[PoolID]*Pool)
nodeName := "" nodeName := ""
if node != nil { if node != nil {
@ -44,12 +44,12 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
switch { switch {
case slice.Spec.NodeName != "": case slice.Spec.NodeName != "":
if slice.Spec.NodeName == nodeName { if slice.Spec.NodeName == nodeName {
if err := addSlice(pools, slice); err != nil { if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
return nil, fmt.Errorf("add node slice %s: %w", slice.Name, err) return nil, fmt.Errorf("add node slice %s: %w", slice.Name, err)
} }
} }
case slice.Spec.AllNodes: case slice.Spec.AllNodes:
if err := addSlice(pools, slice); err != nil { if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err) return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
} }
case slice.Spec.NodeSelector != nil: case slice.Spec.NodeSelector != nil:
@ -59,10 +59,17 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
return nil, fmt.Errorf("node selector in resource slice %s: %w", slice.Name, err) return nil, fmt.Errorf("node selector in resource slice %s: %w", slice.Name, err)
} }
if selector.Match(node) { if selector.Match(node) {
if err := addSlice(pools, slice); err != nil { if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
return nil, fmt.Errorf("add matching slice %s: %w", slice.Name, err) return nil, fmt.Errorf("add matching slice %s: %w", slice.Name, err)
} }
} }
case slice.Spec.PerDeviceNodeSelection:
// We add the slice here regardless of whether the partitionable devices feature is
// enabled. If we don't, the full slice will be considered incomplete. So we filter
// out devices that have fields from the partitionable devices feature set later.
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
}
default: default:
// Nothing known was set. This must be some future, unknown extension, // Nothing known was set. This must be some future, unknown extension,
// so we don't know how to handle it. We may still be able to allocated from // so we don't know how to handle it. We may still be able to allocated from
@ -87,9 +94,16 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
return result, nil return result, nil
} }
func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice) error { func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice, node *v1.Node, partitionableDevicesEnabled bool) error {
var slice draapi.ResourceSlice var slice draapi.ResourceSlice
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, nil); err != nil { sliceScope := draapi.SliceScope{
SliceContext: draapi.SliceContext{
Slice: s,
Node: node,
PartitionableDevicesEnabled: partitionableDevicesEnabled,
},
}
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, sliceScope); err != nil {
return fmt.Errorf("convert ResourceSlice: %w", err) return fmt.Errorf("convert ResourceSlice: %w", err)
} }