mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Allocator updates
This commit is contained in:
parent
ece1d76e80
commit
ecba6cde1d
@ -454,7 +454,12 @@ func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
||||
if err != nil {
|
||||
return nil, statusError(logger, err)
|
||||
}
|
||||
allocator, err := structured.NewAllocator(ctx, pl.enableAdminAccess, pl.enablePrioritizedList, pl.enableDeviceTaints, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache)
|
||||
features := structured.Features{
|
||||
AdminAccess: pl.enableAdminAccess,
|
||||
PrioritizedList: pl.enablePrioritizedList,
|
||||
DeviceTaints: pl.enableDeviceTaints,
|
||||
}
|
||||
allocator, err := structured.NewAllocator(ctx, features, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache)
|
||||
if err != nil {
|
||||
return nil, statusError(logger, err)
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/dynamic-resource-allocation/api"
|
||||
draapi "k8s.io/dynamic-resource-allocation/api"
|
||||
"k8s.io/dynamic-resource-allocation/cel"
|
||||
"k8s.io/dynamic-resource-allocation/resourceclaim"
|
||||
@ -48,14 +49,18 @@ type deviceClassLister interface {
|
||||
// available and the current state of the cluster (claims, classes, resource
|
||||
// slices).
|
||||
type Allocator struct {
|
||||
adminAccessEnabled bool
|
||||
prioritizedListEnabled bool
|
||||
deviceTaintsEnabled bool
|
||||
claimsToAllocate []*resourceapi.ResourceClaim
|
||||
allocatedDevices sets.Set[DeviceID]
|
||||
classLister deviceClassLister
|
||||
slices []*resourceapi.ResourceSlice
|
||||
celCache *cel.Cache
|
||||
features Features
|
||||
claimsToAllocate []*resourceapi.ResourceClaim
|
||||
allocatedDevices sets.Set[DeviceID]
|
||||
classLister deviceClassLister
|
||||
slices []*resourceapi.ResourceSlice
|
||||
celCache *cel.Cache
|
||||
}
|
||||
|
||||
type Features struct {
|
||||
AdminAccess bool
|
||||
PrioritizedList bool
|
||||
PartitionableDevices bool
|
||||
}
|
||||
|
||||
// NewAllocator returns an allocator for a certain set of claims or an error if
|
||||
@ -63,9 +68,7 @@ type Allocator struct {
|
||||
//
|
||||
// The returned Allocator can be used multiple times and is thread-safe.
|
||||
func NewAllocator(ctx context.Context,
|
||||
adminAccessEnabled bool,
|
||||
prioritizedListEnabled bool,
|
||||
deviceTaintsEnabled bool,
|
||||
features Features,
|
||||
claimsToAllocate []*resourceapi.ResourceClaim,
|
||||
allocatedDevices sets.Set[DeviceID],
|
||||
classLister deviceClassLister,
|
||||
@ -73,14 +76,12 @@ func NewAllocator(ctx context.Context,
|
||||
celCache *cel.Cache,
|
||||
) (*Allocator, error) {
|
||||
return &Allocator{
|
||||
adminAccessEnabled: adminAccessEnabled,
|
||||
prioritizedListEnabled: prioritizedListEnabled,
|
||||
deviceTaintsEnabled: deviceTaintsEnabled,
|
||||
claimsToAllocate: claimsToAllocate,
|
||||
allocatedDevices: allocatedDevices,
|
||||
classLister: classLister,
|
||||
slices: slices,
|
||||
celCache: celCache,
|
||||
features: features,
|
||||
claimsToAllocate: claimsToAllocate,
|
||||
allocatedDevices: allocatedDevices,
|
||||
classLister: classLister,
|
||||
slices: slices,
|
||||
celCache: celCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -126,7 +127,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
||||
defer alloc.logger.V(5).Info("Done with allocation", "success", len(finalResult) == len(alloc.claimsToAllocate), "err", finalErr)
|
||||
|
||||
// First determine all eligible pools.
|
||||
pools, err := GatherPools(ctx, alloc.slices, node)
|
||||
pools, err := GatherPools(ctx, alloc.slices, node, a.features.PartitionableDevices)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gather pool information: %w", err)
|
||||
}
|
||||
@ -513,9 +514,9 @@ type requestData struct {
|
||||
}
|
||||
|
||||
type deviceWithID struct {
|
||||
id DeviceID
|
||||
basic *draapi.BasicDevice
|
||||
slice *draapi.ResourceSlice
|
||||
id DeviceID
|
||||
device *draapi.Device
|
||||
slice *draapi.ResourceSlice
|
||||
}
|
||||
|
||||
type internalAllocationResult struct {
|
||||
@ -526,6 +527,7 @@ type internalDeviceResult struct {
|
||||
request string // name of the request (if no subrequests) or the subrequest
|
||||
parentRequest string // name of the request which contains the subrequest, empty otherwise
|
||||
id DeviceID
|
||||
device *draapi.Device
|
||||
slice *draapi.ResourceSlice
|
||||
adminAccess *bool
|
||||
}
|
||||
@ -621,7 +623,7 @@ func (m *matchAttributeConstraint) add(requestName, subRequestName string, devic
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *matchAttributeConstraint) remove(requestName, subRequestName string, device *draapi.BasicDevice, deviceID DeviceID) {
|
||||
func (m *matchAttributeConstraint) remove(requestName, subRequestName string, device *draapi.Device, deviceID DeviceID) {
|
||||
if m.requestNames.Len() > 0 && !m.matches(requestName, subRequestName) {
|
||||
// Device not affected by constraint.
|
||||
return
|
||||
@ -640,7 +642,7 @@ func (m *matchAttributeConstraint) matches(requestName, subRequestName string) b
|
||||
}
|
||||
}
|
||||
|
||||
func lookupAttribute(device *draapi.BasicDevice, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
|
||||
func lookupAttribute(device *draapi.Device, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
|
||||
// Fully-qualified match?
|
||||
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok {
|
||||
return &attr
|
||||
@ -807,9 +809,9 @@ func (alloc *allocator) allocateOne(r deviceIndices, allocateSubRequest bool) (b
|
||||
|
||||
// Finally treat as allocated and move on to the next device.
|
||||
device := deviceWithID{
|
||||
id: deviceID,
|
||||
basic: slice.Spec.Devices[deviceIndex].Basic,
|
||||
slice: slice,
|
||||
id: deviceID,
|
||||
device: &slice.Spec.Devices[deviceIndex],
|
||||
slice: slice,
|
||||
}
|
||||
allocated, deallocate, err := alloc.allocateDevice(r, device, false)
|
||||
if err != nil {
|
||||
@ -888,7 +890,7 @@ func (alloc *allocator) isSelectable(r requestIndices, requestData requestData,
|
||||
|
||||
}
|
||||
|
||||
func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDevice, deviceID DeviceID, class *resourceapi.DeviceClass, selectors []resourceapi.DeviceSelector) (bool, error) {
|
||||
func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.Device, deviceID DeviceID, class *resourceapi.DeviceClass, selectors []resourceapi.DeviceSelector) (bool, error) {
|
||||
for i, selector := range selectors {
|
||||
expr := alloc.celCache.GetOrCompile(selector.CEL.Expression)
|
||||
if expr.Error != nil {
|
||||
@ -903,13 +905,15 @@ func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDev
|
||||
return false, fmt.Errorf("claim %s: selector #%d: CEL compile error: %w", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), i, expr.Error)
|
||||
}
|
||||
|
||||
// If this conversion turns out to be expensive, the CEL package could be converted
|
||||
// to use unique strings.
|
||||
var d resourceapi.BasicDevice
|
||||
if err := draapi.Convert_api_BasicDevice_To_v1beta1_BasicDevice(device, &d, nil); err != nil {
|
||||
return false, fmt.Errorf("convert BasicDevice: %w", err)
|
||||
attributes := make(map[resourceapi.QualifiedName]resourceapi.DeviceAttribute)
|
||||
if err := draapi.Convert_api_Attributes_To_v1beta1_Attributes(device.Attributes, attributes); err != nil {
|
||||
return false, fmt.Errorf("convert attributes: %w", err)
|
||||
}
|
||||
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: d.Attributes, Capacity: d.Capacity})
|
||||
capacity := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity)
|
||||
if err := draapi.Convert_api_Capacity_To_v1beta1_Capacity(device.Capacity, capacity); err != nil {
|
||||
return false, fmt.Errorf("convert capacity: %w", err)
|
||||
}
|
||||
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: attributes, Capacity: capacity})
|
||||
if class != nil {
|
||||
alloc.logger.V(7).Info("CEL result", "device", deviceID, "class", klog.KObj(class), "selector", i, "expression", selector.CEL.Expression, "matches", matches, "actualCost", ptr.Deref(details.ActualCost(), 0), "err", err)
|
||||
} else {
|
||||
@ -949,6 +953,17 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// If a device consumes capacity from a capacity pool, verify that
|
||||
// there is sufficient capacity available.
|
||||
ok, err := alloc.checkAvailableCapacity(device)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
if !ok {
|
||||
alloc.logger.V(7).Info("Insufficient capacity", "device", device.id)
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
var parentRequestName string
|
||||
var baseRequestName string
|
||||
var subRequestName string
|
||||
@ -968,7 +983,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
||||
|
||||
// It's available. Now check constraints.
|
||||
for i, constraint := range alloc.constraints[r.claimIndex] {
|
||||
added := constraint.add(baseRequestName, subRequestName, device.basic, device.id)
|
||||
added := constraint.add(baseRequestName, subRequestName, device.device, device.id)
|
||||
if !added {
|
||||
if must {
|
||||
// It does not make sense to declare a claim where a constraint prevents getting
|
||||
@ -978,7 +993,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
||||
|
||||
// Roll back for all previous constraints before we return.
|
||||
for e := 0; e < i; e++ {
|
||||
alloc.constraints[r.claimIndex][e].remove(baseRequestName, subRequestName, device.basic, device.id)
|
||||
alloc.constraints[r.claimIndex][e].remove(baseRequestName, subRequestName, device.device, device.id)
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
@ -994,6 +1009,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
||||
request: request.name(),
|
||||
parentRequest: parentRequestName,
|
||||
id: device.id,
|
||||
device: device.device,
|
||||
slice: device.slice,
|
||||
}
|
||||
if request.adminAccess() {
|
||||
@ -1004,7 +1020,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
||||
|
||||
return true, func() {
|
||||
for _, constraint := range alloc.constraints[r.claimIndex] {
|
||||
constraint.remove(baseRequestName, subRequestName, device.basic, device.id)
|
||||
constraint.remove(baseRequestName, subRequestName, device.device, device.id)
|
||||
}
|
||||
if !request.adminAccess() {
|
||||
alloc.allocatingDevices[device.id] = false
|
||||
@ -1033,18 +1049,96 @@ func taintTolerated(taint resourceapi.DeviceTaint, request requestAccessor) bool
|
||||
return false
|
||||
}
|
||||
|
||||
func (alloc *allocator) checkAvailableCapacity(device deviceWithID) (bool, error) {
|
||||
slice := device.slice
|
||||
|
||||
referencedCapacityPools := sets.New[api.UniqueString]()
|
||||
for _, consumedCapacity := range device.device.ConsumesCapacity {
|
||||
referencedCapacityPools.Insert(consumedCapacity.CapacityPool)
|
||||
}
|
||||
|
||||
// Create a structure that captures the initial capacity for all pools
|
||||
// referenced by the device.
|
||||
availableCapacities := make(map[api.UniqueString]map[api.QualifiedName]api.DeviceCapacity)
|
||||
for _, capacityPool := range slice.Spec.CapacityPools {
|
||||
if !referencedCapacityPools.Has(capacityPool.Name) {
|
||||
continue
|
||||
}
|
||||
poolCapacity := make(map[api.QualifiedName]api.DeviceCapacity)
|
||||
for name, cap := range capacityPool.Capacity {
|
||||
poolCapacity[name] = cap
|
||||
}
|
||||
availableCapacities[capacityPool.Name] = poolCapacity
|
||||
}
|
||||
|
||||
// Update the data structure to reflect capacity already in use.
|
||||
for _, device := range slice.Spec.Devices {
|
||||
deviceID := DeviceID{
|
||||
Driver: slice.Spec.Driver,
|
||||
Pool: slice.Spec.Pool.Name,
|
||||
Device: device.Name,
|
||||
}
|
||||
if !(alloc.allocatedDevices.Has(deviceID) || alloc.allocatingDevices[deviceID]) {
|
||||
continue
|
||||
}
|
||||
for _, consumedCapacity := range device.ConsumesCapacity {
|
||||
poolCapacity := availableCapacities[consumedCapacity.CapacityPool]
|
||||
for name, cap := range consumedCapacity.Capacity {
|
||||
existingCap, ok := poolCapacity[name]
|
||||
if !ok {
|
||||
// Just continue for now, but this probably should be an error.
|
||||
continue
|
||||
}
|
||||
// This can potentially result in negative available capacity. That is fine,
|
||||
// we just treat it as no capacity available.
|
||||
existingCap.Value.Sub(cap.Value)
|
||||
poolCapacity[name] = existingCap
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if all consumed capacities for the device can be satisfied.
|
||||
for _, deviceConsumedCapacity := range device.device.ConsumesCapacity {
|
||||
poolCapacity := availableCapacities[deviceConsumedCapacity.CapacityPool]
|
||||
for name, cap := range deviceConsumedCapacity.Capacity {
|
||||
availableCap, found := poolCapacity[name]
|
||||
// If the device requests a capacity that doesn't exist in
|
||||
// the pool, it can not be allocated.
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
// If the device requests more capacity than is available, it
|
||||
// can not be allocated.
|
||||
if availableCap.Value.Cmp(cap.Value) < 0 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// createNodeSelector constructs a node selector for the allocation, if needed,
|
||||
// otherwise it returns nil.
|
||||
func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.NodeSelector, error) {
|
||||
// Selector with one term. That term gets extended with additional
|
||||
// requirements from the different devices.
|
||||
nodeSelector := &v1.NodeSelector{
|
||||
ns := &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{}},
|
||||
}
|
||||
|
||||
for i := range result {
|
||||
slice := result[i].slice
|
||||
if slice.Spec.NodeName != draapi.NullUniqueString {
|
||||
var nodeName draapi.UniqueString
|
||||
var nodeSelector *v1.NodeSelector
|
||||
if slice.Spec.PerDeviceNodeSelection {
|
||||
nodeName = result[i].device.NodeName
|
||||
nodeSelector = result[i].device.NodeSelector
|
||||
} else {
|
||||
nodeName = slice.Spec.NodeName
|
||||
nodeSelector = slice.Spec.NodeSelector
|
||||
}
|
||||
if nodeName != draapi.NullUniqueString {
|
||||
// At least one device is local to one node. This
|
||||
// restricts the allocation to that node.
|
||||
return &v1.NodeSelector{
|
||||
@ -1052,29 +1146,29 @@ func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.N
|
||||
MatchFields: []v1.NodeSelectorRequirement{{
|
||||
Key: "metadata.name",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{slice.Spec.NodeName.String()},
|
||||
Values: []string{nodeName.String()},
|
||||
}},
|
||||
}},
|
||||
}, nil
|
||||
}
|
||||
if slice.Spec.NodeSelector != nil {
|
||||
switch len(slice.Spec.NodeSelector.NodeSelectorTerms) {
|
||||
if nodeSelector != nil {
|
||||
switch len(nodeSelector.NodeSelectorTerms) {
|
||||
case 0:
|
||||
// Nothing?
|
||||
case 1:
|
||||
// Add all terms if they are not present already.
|
||||
addNewNodeSelectorRequirements(slice.Spec.NodeSelector.NodeSelectorTerms[0].MatchFields, &nodeSelector.NodeSelectorTerms[0].MatchFields)
|
||||
addNewNodeSelectorRequirements(slice.Spec.NodeSelector.NodeSelectorTerms[0].MatchExpressions, &nodeSelector.NodeSelectorTerms[0].MatchExpressions)
|
||||
addNewNodeSelectorRequirements(nodeSelector.NodeSelectorTerms[0].MatchFields, &ns.NodeSelectorTerms[0].MatchFields)
|
||||
addNewNodeSelectorRequirements(nodeSelector.NodeSelectorTerms[0].MatchExpressions, &ns.NodeSelectorTerms[0].MatchExpressions)
|
||||
default:
|
||||
// This shouldn't occur, validation must prevent creation of such slices.
|
||||
return nil, fmt.Errorf("unsupported ResourceSlice.NodeSelector with %d terms", len(slice.Spec.NodeSelector.NodeSelectorTerms))
|
||||
return nil, fmt.Errorf("unsupported ResourceSlice.NodeSelector with %d terms", len(nodeSelector.NodeSelectorTerms))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodeSelector.NodeSelectorTerms[0].MatchFields) > 0 || len(nodeSelector.NodeSelectorTerms[0].MatchExpressions) > 0 {
|
||||
if len(ns.NodeSelectorTerms[0].MatchFields) > 0 || len(ns.NodeSelectorTerms[0].MatchExpressions) > 0 {
|
||||
// We have a valid node selector.
|
||||
return nodeSelector, nil
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
// Available everywhere.
|
||||
|
@ -41,36 +41,38 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
region1 = "region-1"
|
||||
region2 = "region-2"
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
classA = "class-a"
|
||||
classB = "class-b"
|
||||
driverA = "driver-a"
|
||||
driverB = "driver-b"
|
||||
pool1 = "pool-1"
|
||||
pool2 = "pool-2"
|
||||
pool3 = "pool-3"
|
||||
pool4 = "pool-4"
|
||||
req0 = "req-0"
|
||||
req1 = "req-1"
|
||||
req2 = "req-2"
|
||||
req3 = "req-3"
|
||||
subReq0 = "subReq-0"
|
||||
subReq1 = "subReq-1"
|
||||
req0SubReq0 = "req-0/subReq-0"
|
||||
req0SubReq1 = "req-0/subReq-1"
|
||||
req1SubReq0 = "req-1/subReq-0"
|
||||
req1SubReq1 = "req-1/subReq-1"
|
||||
claim0 = "claim-0"
|
||||
claim1 = "claim-1"
|
||||
slice1 = "slice-1"
|
||||
slice2 = "slice-2"
|
||||
device1 = "device-1"
|
||||
device2 = "device-2"
|
||||
device3 = "device-3"
|
||||
device4 = "device-4"
|
||||
region1 = "region-1"
|
||||
region2 = "region-2"
|
||||
node1 = "node-1"
|
||||
node2 = "node-2"
|
||||
classA = "class-a"
|
||||
classB = "class-b"
|
||||
driverA = "driver-a"
|
||||
driverB = "driver-b"
|
||||
pool1 = "pool-1"
|
||||
pool2 = "pool-2"
|
||||
pool3 = "pool-3"
|
||||
pool4 = "pool-4"
|
||||
req0 = "req-0"
|
||||
req1 = "req-1"
|
||||
req2 = "req-2"
|
||||
req3 = "req-3"
|
||||
subReq0 = "subReq-0"
|
||||
subReq1 = "subReq-1"
|
||||
req0SubReq0 = "req-0/subReq-0"
|
||||
req0SubReq1 = "req-0/subReq-1"
|
||||
req1SubReq0 = "req-1/subReq-0"
|
||||
req1SubReq1 = "req-1/subReq-1"
|
||||
claim0 = "claim-0"
|
||||
claim1 = "claim-1"
|
||||
slice1 = "slice-1"
|
||||
slice2 = "slice-2"
|
||||
device1 = "device-1"
|
||||
device2 = "device-2"
|
||||
device3 = "device-3"
|
||||
device4 = "device-4"
|
||||
capacityPool1 = "capacity-pool-1"
|
||||
capacityPool2 = "capacity-pool-2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -241,11 +243,71 @@ func device(name string, capacity map[resourceapi.QualifiedName]resource.Quantit
|
||||
Name: name,
|
||||
Basic: &resourceapi.BasicDevice{
|
||||
Attributes: attributes,
|
||||
Capacity: toDeviceCapacity(capacity),
|
||||
},
|
||||
}
|
||||
device.Basic.Capacity = make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity, len(capacity))
|
||||
for name, quantity := range capacity {
|
||||
device.Basic.Capacity[name] = resourceapi.DeviceCapacity{Value: quantity}
|
||||
return wrapDevice(device)
|
||||
}
|
||||
|
||||
const (
|
||||
fromDeviceCapacityConsumption = "fromDeviceCapacityConsumption"
|
||||
)
|
||||
|
||||
func compositeDevice(name string, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
|
||||
consumesCapacity ...resourceapi.DeviceCapacityConsumption) resourceapi.Device {
|
||||
|
||||
device := resourceapi.Device{
|
||||
Name: name,
|
||||
Composite: &resourceapi.CompositeDevice{
|
||||
Attributes: attributes,
|
||||
},
|
||||
}
|
||||
|
||||
switch capacity := capacity.(type) {
|
||||
case map[resourceapi.QualifiedName]resource.Quantity:
|
||||
device.Composite.Capacity = toDeviceCapacity(capacity)
|
||||
case string:
|
||||
if capacity == fromDeviceCapacityConsumption {
|
||||
c := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity)
|
||||
for _, dcc := range consumesCapacity {
|
||||
for name, cap := range dcc.Capacity {
|
||||
if _, found := c[name]; found {
|
||||
panic(fmt.Sprintf("same capacity found in multiple device capacity consumptions %q", name))
|
||||
}
|
||||
c[name] = cap
|
||||
}
|
||||
}
|
||||
device.Composite.Capacity = c
|
||||
} else {
|
||||
panic(fmt.Sprintf("unexpected capacity value %q", capacity))
|
||||
}
|
||||
case nil:
|
||||
// nothing to do
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected capacity type %T: %+v", capacity, capacity))
|
||||
}
|
||||
|
||||
device.Composite.ConsumesCapacity = consumesCapacity
|
||||
return device
|
||||
}
|
||||
|
||||
func compositeDeviceWithNodeSelector(name string, nodeSelection any, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
|
||||
consumesCapacity ...resourceapi.DeviceCapacityConsumption) resourceapi.Device {
|
||||
device := compositeDevice(name, capacity, attributes, consumesCapacity...)
|
||||
|
||||
switch nodeSelection := nodeSelection.(type) {
|
||||
case *v1.NodeSelector:
|
||||
device.Composite.NodeSelector = nodeSelection
|
||||
case string:
|
||||
if nodeSelection == nodeSelectionAll {
|
||||
device.Composite.AllNodes = true
|
||||
} else if nodeSelection == nodeSelectionPerDevice {
|
||||
panic("nodeSelectionPerDevice is not supported for devices")
|
||||
} else {
|
||||
device.Composite.NodeName = nodeSelection
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected nodeSelection type %T: %+v", nodeSelection, nodeSelection))
|
||||
}
|
||||
return wrapDevice(device)
|
||||
}
|
||||
@ -263,10 +325,24 @@ func (in wrapDevice) withTaints(taints ...resourceapi.DeviceTaint) wrapDevice {
|
||||
return wrapDevice(*device)
|
||||
}
|
||||
|
||||
func deviceCapacityConsumption(capacityPool string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.DeviceCapacityConsumption {
|
||||
return resourceapi.DeviceCapacityConsumption{
|
||||
CapacityPool: capacityPool,
|
||||
Capacity: toDeviceCapacity(capacity),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
nodeSelectionAll = "nodeSelectionAll"
|
||||
nodeSelectionPerDevice = "nodeSelectionPerDevice"
|
||||
)
|
||||
|
||||
// generate a ResourceSlice object with the given name, node,
|
||||
// driver and pool names, generation and a list of devices.
|
||||
// The nodeSelection parameter may be a string (= node name),
|
||||
// true (= all nodes), or a node selector (= specific nodes).
|
||||
// The nodeSelection parameter may be a string with the value
|
||||
// nodeSelectionAll for all nodes, the value nodeSelectionPerDevice
|
||||
// for per device node selection, or any other value to set the
|
||||
// node name. Providing a node selectors sets the NodeSelector field.
|
||||
func slice(name string, nodeSelection any, pool, driver string, devices ...wrapDevice) *resourceapi.ResourceSlice {
|
||||
slice := &resourceapi.ResourceSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -287,13 +363,14 @@ func slice(name string, nodeSelection any, pool, driver string, devices ...wrapD
|
||||
switch nodeSelection := nodeSelection.(type) {
|
||||
case *v1.NodeSelector:
|
||||
slice.Spec.NodeSelector = nodeSelection
|
||||
case bool:
|
||||
if !nodeSelection {
|
||||
panic("nodeSelection == false is not valid")
|
||||
}
|
||||
slice.Spec.AllNodes = true
|
||||
case string:
|
||||
slice.Spec.NodeName = nodeSelection
|
||||
if nodeSelection == nodeSelectionAll {
|
||||
slice.Spec.AllNodes = true
|
||||
} else if nodeSelection == nodeSelectionPerDevice {
|
||||
slice.Spec.PerDeviceNodeSelection = true
|
||||
} else {
|
||||
slice.Spec.NodeName = nodeSelection
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected nodeSelection type %T: %+v", nodeSelection, nodeSelection))
|
||||
}
|
||||
@ -449,6 +526,28 @@ func sliceWithMultipleDevices(name string, nodeSelection any, pool, driver strin
|
||||
return slice(name, nodeSelection, pool, driver, devices...)
|
||||
}
|
||||
|
||||
func sliceWithCapacityPools(name string, nodeSelection any, pool, driver string, capacityPools []resourceapi.CapacityPool, devices ...resourceapi.Device) *resourceapi.ResourceSlice {
|
||||
slice := slice(name, nodeSelection, pool, driver)
|
||||
slice.Spec.CapacityPools = capacityPools
|
||||
slice.Spec.Devices = devices
|
||||
return slice
|
||||
}
|
||||
|
||||
func capacityPool(name string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.CapacityPool {
|
||||
return resourceapi.CapacityPool{
|
||||
Name: name,
|
||||
Capacity: toDeviceCapacity(capacity),
|
||||
}
|
||||
}
|
||||
|
||||
func toDeviceCapacity(capacity map[resourceapi.QualifiedName]resource.Quantity) map[resourceapi.QualifiedName]resourceapi.DeviceCapacity {
|
||||
out := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity, len(capacity))
|
||||
for name, quantity := range capacity {
|
||||
out[name] = resourceapi.DeviceCapacity{Value: quantity}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestAllocator(t *testing.T) {
|
||||
nonExistentAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "NonExistentAttribute")
|
||||
boolAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "boolAttribute")
|
||||
@ -481,9 +580,7 @@ func TestAllocator(t *testing.T) {
|
||||
}
|
||||
|
||||
testcases := map[string]struct {
|
||||
adminAccess bool
|
||||
prioritizedList bool
|
||||
deviceTaints bool
|
||||
features Features
|
||||
claimsToAllocate []wrapResourceClaim
|
||||
allocatedDevices []DeviceID
|
||||
classes []*resourceapi.DeviceClass
|
||||
@ -1020,7 +1117,9 @@ func TestAllocator(t *testing.T) {
|
||||
expectResults: nil,
|
||||
},
|
||||
"all-devices-some-allocated-admin-access": {
|
||||
adminAccess: true,
|
||||
features: Features{
|
||||
AdminAccess: true,
|
||||
},
|
||||
claimsToAllocate: func() []wrapResourceClaim {
|
||||
c := claim(claim0, req0, classA)
|
||||
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
|
||||
@ -1146,7 +1245,7 @@ func TestAllocator(t *testing.T) {
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(
|
||||
sliceWithOneDevice(slice1, nodeLabelSelector(regionKey, region1), pool1, driverA),
|
||||
sliceWithOneDevice(slice1, true, pool2, driverA),
|
||||
sliceWithOneDevice(slice1, nodeSelectionAll, pool2, driverA),
|
||||
sliceWithOneDevice(slice1, nodeLabelSelector(planetKey, planetValueEarth), pool3, driverA),
|
||||
sliceWithOneDevice(slice1, localNodeSelector(node1), pool4, driverA),
|
||||
),
|
||||
@ -1217,7 +1316,9 @@ func TestAllocator(t *testing.T) {
|
||||
expectResults: nil,
|
||||
},
|
||||
"admin-access-disabled": {
|
||||
adminAccess: false,
|
||||
features: Features{
|
||||
AdminAccess: false,
|
||||
},
|
||||
claimsToAllocate: func() []wrapResourceClaim {
|
||||
c := claim(claim0, req0, classA)
|
||||
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
|
||||
@ -1231,7 +1332,9 @@ func TestAllocator(t *testing.T) {
|
||||
expectError: gomega.MatchError(gomega.ContainSubstring("claim claim-0, request req-0: admin access is requested, but the feature is disabled")),
|
||||
},
|
||||
"admin-access-enabled": {
|
||||
adminAccess: true,
|
||||
features: Features{
|
||||
AdminAccess: true,
|
||||
},
|
||||
claimsToAllocate: func() []wrapResourceClaim {
|
||||
c := claim(claim0, req0, classA)
|
||||
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
|
||||
@ -1544,18 +1647,6 @@ func TestAllocator(t *testing.T) {
|
||||
|
||||
expectError: gomega.MatchError(gomega.ContainSubstring("empty constraint (unsupported constraint type?)")),
|
||||
},
|
||||
"unknown-device": {
|
||||
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(
|
||||
func() *resourceapi.ResourceSlice {
|
||||
slice := sliceWithOneDevice(slice1, node1, pool1, driverA)
|
||||
slice.Spec.Devices[0].Basic = nil /* empty = unknown future extension */
|
||||
return slice
|
||||
}(),
|
||||
),
|
||||
node: node(node1, region1),
|
||||
},
|
||||
"invalid-CEL-one-device": {
|
||||
claimsToAllocate: objects(
|
||||
func() wrapResourceClaim {
|
||||
@ -2104,8 +2195,469 @@ func TestAllocator(t *testing.T) {
|
||||
deviceAllocationResult(req0SubReq1, driverA, pool1, device1, false),
|
||||
)},
|
||||
},
|
||||
"partitionable-devices-single-device": {
|
||||
features: Features{
|
||||
PartitionableDevices: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil, request(req0, classA, 1)),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("8Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device1, nil, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
node: node(node1, region1),
|
||||
expectResults: []any{allocationResult(
|
||||
localNodeSelector(node1),
|
||||
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||
)},
|
||||
},
|
||||
"partitionable-devices-multiple-devices": {
|
||||
features: Features{
|
||||
PrioritizedList: true,
|
||||
PartitionableDevices: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil,
|
||||
request(req0, classA, 1),
|
||||
requestWithPrioritizedList(req1,
|
||||
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||
CEL: &resourceapi.CELDeviceSelector{
|
||||
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("6Gi")) >= 0`, driverA),
|
||||
}},
|
||||
),
|
||||
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
|
||||
CEL: &resourceapi.CELDeviceSelector{
|
||||
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
|
||||
}},
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("8Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
}, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("6Gi"),
|
||||
}, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("6Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
node: node(node1, region1),
|
||||
expectResults: []any{allocationResult(
|
||||
localNodeSelector(node1),
|
||||
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
|
||||
)},
|
||||
},
|
||||
"partitionable-devices-multiple-capacity-pools": {
|
||||
features: Features{
|
||||
PrioritizedList: true,
|
||||
PartitionableDevices: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil,
|
||||
request(req0, classA, 1),
|
||||
requestWithPrioritizedList(req1,
|
||||
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||
CEL: &resourceapi.CELDeviceSelector{
|
||||
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("6Gi")) >= 0`, driverA),
|
||||
}},
|
||||
),
|
||||
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
|
||||
CEL: &resourceapi.CELDeviceSelector{
|
||||
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
|
||||
}},
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("18Gi"),
|
||||
},
|
||||
),
|
||||
capacityPool(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"cpus": resource.MustParse("8"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
deviceCapacityConsumption(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"cpus": resource.MustParse("4"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("6Gi"),
|
||||
},
|
||||
),
|
||||
deviceCapacityConsumption(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"cpus": resource.MustParse("6"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
deviceCapacityConsumption(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"cpus": resource.MustParse("4"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
node: node(node1, region1),
|
||||
expectResults: []any{allocationResult(
|
||||
localNodeSelector(node1),
|
||||
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
|
||||
)},
|
||||
},
|
||||
"partitionable-devices-no-capacity-available": {
|
||||
features: Features{
|
||||
PartitionableDevices: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil,
|
||||
request(req0, classA, 1),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("18Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("16Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
allocatedDevices: []DeviceID{
|
||||
MakeDeviceID(driverA, pool1, device2),
|
||||
},
|
||||
node: node(node1, region1),
|
||||
expectResults: nil,
|
||||
},
|
||||
"partitionable-devices-overallocated-capacity-pool": {
|
||||
features: Features{
|
||||
PartitionableDevices: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil,
|
||||
request(req0, classA, 1),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("18Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("20Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
allocatedDevices: []DeviceID{
|
||||
MakeDeviceID(driverA, pool1, device2),
|
||||
},
|
||||
node: node(node1, region1),
|
||||
expectResults: nil,
|
||||
},
|
||||
"partitionable-devices-disabled": {
|
||||
features: Features{
|
||||
PartitionableDevices: false,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil,
|
||||
request(req0, classA, 1),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("18Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device1, nil, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
node: node(node1, region1),
|
||||
expectResults: nil,
|
||||
},
|
||||
"partitionable-devices-per-device-node-selection-nodename": {
|
||||
features: Features{
|
||||
PartitionableDevices: true,
|
||||
PrioritizedList: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil,
|
||||
requestWithPrioritizedList(req0,
|
||||
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||
CEL: &resourceapi.CELDeviceSelector{
|
||||
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("6Gi")) >= 0`, driverA),
|
||||
}},
|
||||
),
|
||||
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
|
||||
CEL: &resourceapi.CELDeviceSelector{
|
||||
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
|
||||
}},
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("18Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDeviceWithNodeSelector(device1, node1, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDeviceWithNodeSelector(device2, node2, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("6Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
node: node(node1, region1),
|
||||
expectResults: []any{allocationResult(
|
||||
localNodeSelector(node1),
|
||||
deviceAllocationResult(req0SubReq1, driverA, pool1, device1, false),
|
||||
)},
|
||||
},
|
||||
"partitionable-devices-per-device-node-selection-node-selector": {
|
||||
features: Features{
|
||||
PartitionableDevices: true,
|
||||
PrioritizedList: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil, request(req0, classA, 1)),
|
||||
),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("18Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
node: node(node1, region1),
|
||||
expectResults: []any{allocationResult(
|
||||
&v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{Key: regionKey, Operator: v1.NodeSelectorOpIn, Values: []string{region1}},
|
||||
},
|
||||
}},
|
||||
},
|
||||
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||
)},
|
||||
},
|
||||
"partitionable-devices-per-device-node-selection-node-selector-multiple-devices": {
|
||||
features: Features{
|
||||
PartitionableDevices: true,
|
||||
PrioritizedList: true,
|
||||
},
|
||||
claimsToAllocate: objects(
|
||||
claimWithRequests(claim0, nil,
|
||||
request(req0, classA, 3),
|
||||
request(req1, classB, 3),
|
||||
),
|
||||
),
|
||||
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
||||
objects(
|
||||
capacityPool(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("18Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDeviceWithNodeSelector(device2, node1, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDeviceWithNodeSelector(device3, nodeSelectionAll, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool1,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
), sliceWithCapacityPools(slice2, node1, pool2, driverB,
|
||||
objects(
|
||||
capacityPool(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("12Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||
deviceCapacityConsumption(capacityPool2,
|
||||
map[resourceapi.QualifiedName]resource.Quantity{
|
||||
"memory": resource.MustParse("4Gi"),
|
||||
},
|
||||
),
|
||||
),
|
||||
)),
|
||||
node: node(node1, region1),
|
||||
expectResults: []any{allocationResult(
|
||||
&v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{Key: fieldNameKey, Operator: v1.NodeSelectorOpIn, Values: []string{node1}},
|
||||
},
|
||||
}},
|
||||
},
|
||||
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||
deviceAllocationResult(req0, driverA, pool1, device2, false),
|
||||
deviceAllocationResult(req0, driverA, pool1, device3, false),
|
||||
deviceAllocationResult(req1, driverB, pool2, device1, false),
|
||||
deviceAllocationResult(req1, driverB, pool2, device2, false),
|
||||
deviceAllocationResult(req1, driverB, pool2, device3, false),
|
||||
)},
|
||||
},
|
||||
"tainted-two-devices": {
|
||||
deviceTaints: true,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
},
|
||||
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(slice(slice1, node1, pool1, driverA,
|
||||
@ -2115,7 +2667,9 @@ func TestAllocator(t *testing.T) {
|
||||
node: node(node1, region1),
|
||||
},
|
||||
"tainted-one-device-two-taints": {
|
||||
deviceTaints: true,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
},
|
||||
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(slice(slice1, node1, pool1, driverA,
|
||||
@ -2124,7 +2678,9 @@ func TestAllocator(t *testing.T) {
|
||||
node: node(node1, region1),
|
||||
},
|
||||
"tainted-two-devices-tolerated": {
|
||||
deviceTaints: true,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
},
|
||||
claimsToAllocate: objects(claim(claim0, req0, classA).withTolerations(tolerationNoExecute)),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(slice(slice1, node1, pool1, driverA,
|
||||
@ -2138,7 +2694,9 @@ func TestAllocator(t *testing.T) {
|
||||
)},
|
||||
},
|
||||
"tainted-one-device-two-taints-both-tolerated": {
|
||||
deviceTaints: true,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
},
|
||||
claimsToAllocate: objects(claim(claim0, req0, classA).withTolerations(tolerationNoSchedule, tolerationNoExecute)),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(slice(slice1, node1, pool1, driverA,
|
||||
@ -2151,7 +2709,9 @@ func TestAllocator(t *testing.T) {
|
||||
)},
|
||||
},
|
||||
"tainted-disabled": {
|
||||
deviceTaints: false,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
},
|
||||
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
||||
classes: objects(class(classA, driverA)),
|
||||
slices: objects(slice(slice1, node1, pool1, driverA,
|
||||
@ -2164,8 +2724,10 @@ func TestAllocator(t *testing.T) {
|
||||
)},
|
||||
},
|
||||
"tainted-prioritized-list": {
|
||||
deviceTaints: true,
|
||||
prioritizedList: true,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
PrioritizedList: true,
|
||||
},
|
||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||
subRequest(subReq0, classB, 1),
|
||||
subRequest(subReq1, classA, 1),
|
||||
@ -2177,8 +2739,10 @@ func TestAllocator(t *testing.T) {
|
||||
node: node(node1, region1),
|
||||
},
|
||||
"tainted-prioritized-list-disabled": {
|
||||
deviceTaints: false,
|
||||
prioritizedList: true,
|
||||
features: Features{
|
||||
DeviceTaints: false,
|
||||
PrioritizedList: true,
|
||||
},
|
||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||
subRequest(subReq0, classB, 1),
|
||||
subRequest(subReq1, classA, 1),
|
||||
@ -2195,8 +2759,10 @@ func TestAllocator(t *testing.T) {
|
||||
)},
|
||||
},
|
||||
"tainted-admin-access": {
|
||||
deviceTaints: true,
|
||||
adminAccess: true,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
PrioritizedList: true,
|
||||
},
|
||||
claimsToAllocate: func() []wrapResourceClaim {
|
||||
c := claim(claim0, req0, classA)
|
||||
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
|
||||
@ -2213,8 +2779,10 @@ func TestAllocator(t *testing.T) {
|
||||
node: node(node1, region1),
|
||||
},
|
||||
"tainted-admin-access-disabled": {
|
||||
deviceTaints: false,
|
||||
adminAccess: true,
|
||||
features: Features{
|
||||
DeviceTaints: false,
|
||||
AdminAccess: true,
|
||||
},
|
||||
claimsToAllocate: func() []wrapResourceClaim {
|
||||
c := claim(claim0, req0, classA)
|
||||
c.Spec.Devices.Requests[0].AdminAccess = ptr.To(true)
|
||||
@ -2236,7 +2804,9 @@ func TestAllocator(t *testing.T) {
|
||||
)},
|
||||
},
|
||||
"tainted-all-devices-single": {
|
||||
deviceTaints: true,
|
||||
features: Features{
|
||||
DeviceTaints: true,
|
||||
},
|
||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, resourceapi.DeviceRequest{
|
||||
Name: req0,
|
||||
AllocationMode: resourceapi.DeviceAllocationModeAll,
|
||||
@ -2249,7 +2819,9 @@ func TestAllocator(t *testing.T) {
|
||||
node: node(node1, region1),
|
||||
},
|
||||
"tainted-all-devices-single-disabled": {
|
||||
deviceTaints: false,
|
||||
features: Features{
|
||||
DeviceTaints: false,
|
||||
},
|
||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, resourceapi.DeviceRequest{
|
||||
Name: req0,
|
||||
AllocationMode: resourceapi.DeviceAllocationModeAll,
|
||||
@ -2284,7 +2856,7 @@ func TestAllocator(t *testing.T) {
|
||||
allocatedDevices := slices.Clone(tc.allocatedDevices)
|
||||
slices := slices.Clone(tc.slices)
|
||||
|
||||
allocator, err := NewAllocator(ctx, tc.adminAccess, tc.prioritizedList, tc.deviceTaints, unwrap(claimsToAllocate...), sets.New(allocatedDevices...), classLister, slices, cel.NewCache(1))
|
||||
allocator, err := NewAllocator(ctx, tc.features, unwrap(claimsToAllocate...), sets.New(allocatedDevices...), classLister, slices, cel.NewCache(1))
|
||||
g.Expect(err).ToNot(gomega.HaveOccurred())
|
||||
|
||||
results, err := allocator.Allocate(ctx, tc.node)
|
||||
|
@ -33,7 +33,7 @@ import (
|
||||
// Out-dated slices are silently ignored. Pools may be incomplete (not all
|
||||
// required slices available) or invalid (for example, device names not unique).
|
||||
// Both is recorded in the result.
|
||||
func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node) ([]*Pool, error) {
|
||||
func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node, partitionableDevicesEnabled bool) ([]*Pool, error) {
|
||||
pools := make(map[PoolID]*Pool)
|
||||
nodeName := ""
|
||||
if node != nil {
|
||||
@ -44,12 +44,12 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
|
||||
switch {
|
||||
case slice.Spec.NodeName != "":
|
||||
if slice.Spec.NodeName == nodeName {
|
||||
if err := addSlice(pools, slice); err != nil {
|
||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
||||
return nil, fmt.Errorf("add node slice %s: %w", slice.Name, err)
|
||||
}
|
||||
}
|
||||
case slice.Spec.AllNodes:
|
||||
if err := addSlice(pools, slice); err != nil {
|
||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
||||
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
|
||||
}
|
||||
case slice.Spec.NodeSelector != nil:
|
||||
@ -59,10 +59,17 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
|
||||
return nil, fmt.Errorf("node selector in resource slice %s: %w", slice.Name, err)
|
||||
}
|
||||
if selector.Match(node) {
|
||||
if err := addSlice(pools, slice); err != nil {
|
||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
||||
return nil, fmt.Errorf("add matching slice %s: %w", slice.Name, err)
|
||||
}
|
||||
}
|
||||
case slice.Spec.PerDeviceNodeSelection:
|
||||
// We add the slice here regardless of whether the partitionable devices feature is
|
||||
// enabled. If we don't, the full slice will be considered incomplete. So we filter
|
||||
// out devices that have fields from the partitionable devices feature set later.
|
||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
||||
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
|
||||
}
|
||||
default:
|
||||
// Nothing known was set. This must be some future, unknown extension,
|
||||
// so we don't know how to handle it. We may still be able to allocated from
|
||||
@ -87,9 +94,16 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice) error {
|
||||
func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice, node *v1.Node, partitionableDevicesEnabled bool) error {
|
||||
var slice draapi.ResourceSlice
|
||||
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, nil); err != nil {
|
||||
sliceScope := draapi.SliceScope{
|
||||
SliceContext: draapi.SliceContext{
|
||||
Slice: s,
|
||||
Node: node,
|
||||
PartitionableDevicesEnabled: partitionableDevicesEnabled,
|
||||
},
|
||||
}
|
||||
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, sliceScope); err != nil {
|
||||
return fmt.Errorf("convert ResourceSlice: %w", err)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user