mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Complete feature impl, fix issues, add perDeviceNodeSelection support, add tests, address comments, etc.
This commit is contained in:
parent
ecba6cde1d
commit
6d7f11689d
@ -102,10 +102,11 @@ type informationForClaim struct {
|
|||||||
|
|
||||||
// DynamicResources is a plugin that ensures that ResourceClaims are allocated.
|
// DynamicResources is a plugin that ensures that ResourceClaims are allocated.
|
||||||
type DynamicResources struct {
|
type DynamicResources struct {
|
||||||
enabled bool
|
enabled bool
|
||||||
enableAdminAccess bool
|
enableAdminAccess bool
|
||||||
enablePrioritizedList bool
|
enablePrioritizedList bool
|
||||||
enableSchedulingQueueHint bool
|
enableSchedulingQueueHint bool
|
||||||
|
enablePartitionableDevices bool
|
||||||
enableDeviceTaints bool
|
enableDeviceTaints bool
|
||||||
|
|
||||||
fh framework.Handle
|
fh framework.Handle
|
||||||
@ -122,11 +123,12 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
|
|||||||
}
|
}
|
||||||
|
|
||||||
pl := &DynamicResources{
|
pl := &DynamicResources{
|
||||||
enabled: true,
|
enabled: true,
|
||||||
enableAdminAccess: fts.EnableDRAAdminAccess,
|
enableAdminAccess: fts.EnableDRAAdminAccess,
|
||||||
enableDeviceTaints: fts.EnableDRADeviceTaints,
|
enableDeviceTaints: fts.EnableDRADeviceTaints,
|
||||||
enablePrioritizedList: fts.EnableDRAPrioritizedList,
|
enablePrioritizedList: fts.EnableDRAPrioritizedList,
|
||||||
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
|
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
|
||||||
|
enablePartitionableDevices: fts.EnablePartitionableDevices,
|
||||||
|
|
||||||
fh: fh,
|
fh: fh,
|
||||||
clientset: fh.ClientSet(),
|
clientset: fh.ClientSet(),
|
||||||
@ -455,8 +457,9 @@ func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
|||||||
return nil, statusError(logger, err)
|
return nil, statusError(logger, err)
|
||||||
}
|
}
|
||||||
features := structured.Features{
|
features := structured.Features{
|
||||||
AdminAccess: pl.enableAdminAccess,
|
AdminAccess: pl.enableAdminAccess,
|
||||||
PrioritizedList: pl.enablePrioritizedList,
|
PrioritizedList: pl.enablePrioritizedList,
|
||||||
|
PartitionableDevices: pl.enablePartitionableDevices,
|
||||||
DeviceTaints: pl.enableDeviceTaints,
|
DeviceTaints: pl.enableDeviceTaints,
|
||||||
}
|
}
|
||||||
allocator, err := structured.NewAllocator(ctx, features, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache)
|
allocator, err := structured.NewAllocator(ctx, features, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache)
|
||||||
|
@ -33,5 +33,6 @@ type Features struct {
|
|||||||
EnableSchedulingQueueHint bool
|
EnableSchedulingQueueHint bool
|
||||||
EnableAsyncPreemption bool
|
EnableAsyncPreemption bool
|
||||||
EnablePodLevelResources bool
|
EnablePodLevelResources bool
|
||||||
|
EnablePartitionableDevices bool
|
||||||
EnableStorageCapacityScoring bool
|
EnableStorageCapacityScoring bool
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,7 @@ func NewInTreeRegistry() runtime.Registry {
|
|||||||
EnableSchedulingQueueHint: feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints),
|
EnableSchedulingQueueHint: feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints),
|
||||||
EnableAsyncPreemption: feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption),
|
EnableAsyncPreemption: feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption),
|
||||||
EnablePodLevelResources: feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
|
EnablePodLevelResources: feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
|
||||||
|
EnablePartitionableDevices: feature.DefaultFeatureGate.Enabled(features.DRAPartitionableDevices),
|
||||||
EnableStorageCapacityScoring: feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring),
|
EnableStorageCapacityScoring: feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +141,9 @@ type Pool struct {
|
|||||||
// Slice is turned into one ResourceSlice by the controller.
|
// Slice is turned into one ResourceSlice by the controller.
|
||||||
type Slice struct {
|
type Slice struct {
|
||||||
// Devices lists all devices which are part of the slice.
|
// Devices lists all devices which are part of the slice.
|
||||||
Devices []resourceapi.Device
|
Devices []resourceapi.Device
|
||||||
|
SharedCounters []resourceapi.CounterSet
|
||||||
|
PerDeviceNodeSelection *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=true
|
// +k8s:deepcopy-gen=true
|
||||||
@ -548,7 +550,9 @@ func (c *Controller) syncPool(ctx context.Context, poolName string) error {
|
|||||||
if !apiequality.Semantic.DeepEqual(¤tSlice.Spec.Pool, &desiredPool) ||
|
if !apiequality.Semantic.DeepEqual(¤tSlice.Spec.Pool, &desiredPool) ||
|
||||||
!apiequality.Semantic.DeepEqual(currentSlice.Spec.NodeSelector, pool.NodeSelector) ||
|
!apiequality.Semantic.DeepEqual(currentSlice.Spec.NodeSelector, pool.NodeSelector) ||
|
||||||
currentSlice.Spec.AllNodes != desiredAllNodes ||
|
currentSlice.Spec.AllNodes != desiredAllNodes ||
|
||||||
!DevicesDeepEqual(currentSlice.Spec.Devices, pool.Slices[i].Devices) {
|
!DevicesDeepEqual(currentSlice.Spec.Devices, pool.Slices[i].Devices) ||
|
||||||
|
!apiequality.Semantic.DeepEqual(currentSlice.Spec.SharedCounters, pool.Slices[i].SharedCounters) ||
|
||||||
|
!apiequality.Semantic.DeepEqual(currentSlice.Spec.PerDeviceNodeSelection, pool.Slices[i].PerDeviceNodeSelection) {
|
||||||
changedDesiredSlices.Insert(i)
|
changedDesiredSlices.Insert(i)
|
||||||
logger.V(5).Info("Need to update slice", "slice", klog.KObj(currentSlice), "matchIndex", i)
|
logger.V(5).Info("Need to update slice", "slice", klog.KObj(currentSlice), "matchIndex", i)
|
||||||
}
|
}
|
||||||
@ -588,6 +592,8 @@ func (c *Controller) syncPool(ctx context.Context, poolName string) error {
|
|||||||
// have listed the existing slice.
|
// have listed the existing slice.
|
||||||
slice.Spec.NodeSelector = pool.NodeSelector
|
slice.Spec.NodeSelector = pool.NodeSelector
|
||||||
slice.Spec.AllNodes = desiredAllNodes
|
slice.Spec.AllNodes = desiredAllNodes
|
||||||
|
slice.Spec.SharedCounters = pool.Slices[i].SharedCounters
|
||||||
|
slice.Spec.PerDeviceNodeSelection = pool.Slices[i].PerDeviceNodeSelection
|
||||||
// Preserve TimeAdded from existing device, if there is a matching device and taint.
|
// Preserve TimeAdded from existing device, if there is a matching device and taint.
|
||||||
slice.Spec.Devices = copyTaintTimeAdded(slice.Spec.Devices, pool.Slices[i].Devices)
|
slice.Spec.Devices = copyTaintTimeAdded(slice.Spec.Devices, pool.Slices[i].Devices)
|
||||||
|
|
||||||
@ -639,12 +645,14 @@ func (c *Controller) syncPool(ctx context.Context, poolName string) error {
|
|||||||
GenerateName: generateName,
|
GenerateName: generateName,
|
||||||
},
|
},
|
||||||
Spec: resourceapi.ResourceSliceSpec{
|
Spec: resourceapi.ResourceSliceSpec{
|
||||||
Driver: c.driverName,
|
Driver: c.driverName,
|
||||||
Pool: desiredPool,
|
Pool: desiredPool,
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
NodeSelector: pool.NodeSelector,
|
NodeSelector: pool.NodeSelector,
|
||||||
AllNodes: desiredAllNodes,
|
AllNodes: desiredAllNodes,
|
||||||
Devices: pool.Slices[i].Devices,
|
Devices: pool.Slices[i].Devices,
|
||||||
|
SharedCounters: pool.Slices[i].SharedCounters,
|
||||||
|
PerDeviceNodeSelection: pool.Slices[i].PerDeviceNodeSelection,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
resourceapi "k8s.io/api/resource/v1beta1"
|
resourceapi "k8s.io/api/resource/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/dynamic-resource-allocation/api"
|
|
||||||
draapi "k8s.io/dynamic-resource-allocation/api"
|
draapi "k8s.io/dynamic-resource-allocation/api"
|
||||||
"k8s.io/dynamic-resource-allocation/cel"
|
"k8s.io/dynamic-resource-allocation/cel"
|
||||||
"k8s.io/dynamic-resource-allocation/resourceclaim"
|
"k8s.io/dynamic-resource-allocation/resourceclaim"
|
||||||
@ -61,6 +60,7 @@ type Features struct {
|
|||||||
AdminAccess bool
|
AdminAccess bool
|
||||||
PrioritizedList bool
|
PrioritizedList bool
|
||||||
PartitionableDevices bool
|
PartitionableDevices bool
|
||||||
|
DeviceTaints bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAllocator returns an allocator for a certain set of claims or an error if
|
// NewAllocator returns an allocator for a certain set of claims or an error if
|
||||||
@ -118,6 +118,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
Allocator: a,
|
Allocator: a,
|
||||||
ctx: ctx, // all methods share the same a and thus ctx
|
ctx: ctx, // all methods share the same a and thus ctx
|
||||||
logger: klog.FromContext(ctx),
|
logger: klog.FromContext(ctx),
|
||||||
|
node: node,
|
||||||
deviceMatchesRequest: make(map[matchKey]bool),
|
deviceMatchesRequest: make(map[matchKey]bool),
|
||||||
constraints: make([][]constraint, len(a.claimsToAllocate)),
|
constraints: make([][]constraint, len(a.claimsToAllocate)),
|
||||||
requestData: make(map[requestIndices]requestData),
|
requestData: make(map[requestIndices]requestData),
|
||||||
@ -127,7 +128,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
defer alloc.logger.V(5).Info("Done with allocation", "success", len(finalResult) == len(alloc.claimsToAllocate), "err", finalErr)
|
defer alloc.logger.V(5).Info("Done with allocation", "success", len(finalResult) == len(alloc.claimsToAllocate), "err", finalErr)
|
||||||
|
|
||||||
// First determine all eligible pools.
|
// First determine all eligible pools.
|
||||||
pools, err := GatherPools(ctx, alloc.slices, node, a.features.PartitionableDevices)
|
pools, err := GatherPools(ctx, alloc.slices, node, a.features)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("gather pool information: %w", err)
|
return nil, fmt.Errorf("gather pool information: %w", err)
|
||||||
}
|
}
|
||||||
@ -175,7 +176,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
|
|
||||||
// Error out if the prioritizedList feature is not enabled and the request
|
// Error out if the prioritizedList feature is not enabled and the request
|
||||||
// has subrequests. This is to avoid surprising behavior for users.
|
// has subrequests. This is to avoid surprising behavior for users.
|
||||||
if !a.prioritizedListEnabled && hasSubRequests {
|
if !a.features.PrioritizedList && hasSubRequests {
|
||||||
return nil, fmt.Errorf("claim %s, request %s: has subrequests, but the DRAPrioritizedList feature is disabled", klog.KObj(claim), request.Name)
|
return nil, fmt.Errorf("claim %s, request %s: has subrequests, but the DRAPrioritizedList feature is disabled", klog.KObj(claim), request.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,7 +386,7 @@ func (a *allocator) validateDeviceRequest(request requestAccessor, parentRequest
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !a.adminAccessEnabled && request.hasAdminAccess() {
|
if !a.features.AdminAccess && request.hasAdminAccess() {
|
||||||
return requestData, fmt.Errorf("claim %s, request %s: admin access is requested, but the feature is disabled", klog.KObj(claim), request.name())
|
return requestData, fmt.Errorf("claim %s, request %s: admin access is requested, but the feature is disabled", klog.KObj(claim), request.name())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -462,6 +463,7 @@ type allocator struct {
|
|||||||
*Allocator
|
*Allocator
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
logger klog.Logger
|
logger klog.Logger
|
||||||
|
node *v1.Node
|
||||||
pools []*Pool
|
pools []*Pool
|
||||||
deviceMatchesRequest map[matchKey]bool
|
deviceMatchesRequest map[matchKey]bool
|
||||||
constraints [][]constraint // one list of constraints per claim
|
constraints [][]constraint // one list of constraints per claim
|
||||||
@ -514,9 +516,9 @@ type requestData struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type deviceWithID struct {
|
type deviceWithID struct {
|
||||||
id DeviceID
|
id DeviceID
|
||||||
device *draapi.Device
|
basic *draapi.BasicDevice
|
||||||
slice *draapi.ResourceSlice
|
slice *draapi.ResourceSlice
|
||||||
}
|
}
|
||||||
|
|
||||||
type internalAllocationResult struct {
|
type internalAllocationResult struct {
|
||||||
@ -527,7 +529,7 @@ type internalDeviceResult struct {
|
|||||||
request string // name of the request (if no subrequests) or the subrequest
|
request string // name of the request (if no subrequests) or the subrequest
|
||||||
parentRequest string // name of the request which contains the subrequest, empty otherwise
|
parentRequest string // name of the request which contains the subrequest, empty otherwise
|
||||||
id DeviceID
|
id DeviceID
|
||||||
device *draapi.Device
|
basic *draapi.BasicDevice
|
||||||
slice *draapi.ResourceSlice
|
slice *draapi.ResourceSlice
|
||||||
adminAccess *bool
|
adminAccess *bool
|
||||||
}
|
}
|
||||||
@ -623,7 +625,7 @@ func (m *matchAttributeConstraint) add(requestName, subRequestName string, devic
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *matchAttributeConstraint) remove(requestName, subRequestName string, device *draapi.Device, deviceID DeviceID) {
|
func (m *matchAttributeConstraint) remove(requestName, subRequestName string, device *draapi.BasicDevice, deviceID DeviceID) {
|
||||||
if m.requestNames.Len() > 0 && !m.matches(requestName, subRequestName) {
|
if m.requestNames.Len() > 0 && !m.matches(requestName, subRequestName) {
|
||||||
// Device not affected by constraint.
|
// Device not affected by constraint.
|
||||||
return
|
return
|
||||||
@ -642,7 +644,7 @@ func (m *matchAttributeConstraint) matches(requestName, subRequestName string) b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupAttribute(device *draapi.Device, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
|
func lookupAttribute(device *draapi.BasicDevice, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
|
||||||
// Fully-qualified match?
|
// Fully-qualified match?
|
||||||
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok {
|
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok {
|
||||||
return &attr
|
return &attr
|
||||||
@ -809,9 +811,9 @@ func (alloc *allocator) allocateOne(r deviceIndices, allocateSubRequest bool) (b
|
|||||||
|
|
||||||
// Finally treat as allocated and move on to the next device.
|
// Finally treat as allocated and move on to the next device.
|
||||||
device := deviceWithID{
|
device := deviceWithID{
|
||||||
id: deviceID,
|
id: deviceID,
|
||||||
device: &slice.Spec.Devices[deviceIndex],
|
basic: slice.Spec.Devices[deviceIndex].Basic,
|
||||||
slice: slice,
|
slice: slice,
|
||||||
}
|
}
|
||||||
allocated, deallocate, err := alloc.allocateDevice(r, device, false)
|
allocated, deallocate, err := alloc.allocateDevice(r, device, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -885,12 +887,31 @@ func (alloc *allocator) isSelectable(r requestIndices, requestData requestData,
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ptr.Deref(slice.Spec.PerDeviceNodeSelection, false) {
|
||||||
|
var nodeName string
|
||||||
|
var allNodes bool
|
||||||
|
if device.NodeName != nil {
|
||||||
|
nodeName = *device.NodeName
|
||||||
|
}
|
||||||
|
if device.AllNodes != nil {
|
||||||
|
allNodes = *device.AllNodes
|
||||||
|
}
|
||||||
|
matches, err := nodeMatches(alloc.node, nodeName, allNodes, device.NodeSelector)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if !matches {
|
||||||
|
alloc.deviceMatchesRequest[matchKey] = false
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
alloc.deviceMatchesRequest[matchKey] = true
|
alloc.deviceMatchesRequest[matchKey] = true
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.Device, deviceID DeviceID, class *resourceapi.DeviceClass, selectors []resourceapi.DeviceSelector) (bool, error) {
|
func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDevice, deviceID DeviceID, class *resourceapi.DeviceClass, selectors []resourceapi.DeviceSelector) (bool, error) {
|
||||||
for i, selector := range selectors {
|
for i, selector := range selectors {
|
||||||
expr := alloc.celCache.GetOrCompile(selector.CEL.Expression)
|
expr := alloc.celCache.GetOrCompile(selector.CEL.Expression)
|
||||||
if expr.Error != nil {
|
if expr.Error != nil {
|
||||||
@ -905,15 +926,13 @@ func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.Device,
|
|||||||
return false, fmt.Errorf("claim %s: selector #%d: CEL compile error: %w", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), i, expr.Error)
|
return false, fmt.Errorf("claim %s: selector #%d: CEL compile error: %w", klog.KObj(alloc.claimsToAllocate[r.claimIndex]), i, expr.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
attributes := make(map[resourceapi.QualifiedName]resourceapi.DeviceAttribute)
|
// If this conversion turns out to be expensive, the CEL package could be converted
|
||||||
if err := draapi.Convert_api_Attributes_To_v1beta1_Attributes(device.Attributes, attributes); err != nil {
|
// to use unique strings.
|
||||||
return false, fmt.Errorf("convert attributes: %w", err)
|
var d resourceapi.BasicDevice
|
||||||
|
if err := draapi.Convert_api_BasicDevice_To_v1beta1_BasicDevice(device, &d, nil); err != nil {
|
||||||
|
return false, fmt.Errorf("convert BasicDevice: %w", err)
|
||||||
}
|
}
|
||||||
capacity := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity)
|
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: d.Attributes, Capacity: d.Capacity})
|
||||||
if err := draapi.Convert_api_Capacity_To_v1beta1_Capacity(device.Capacity, capacity); err != nil {
|
|
||||||
return false, fmt.Errorf("convert capacity: %w", err)
|
|
||||||
}
|
|
||||||
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: attributes, Capacity: capacity})
|
|
||||||
if class != nil {
|
if class != nil {
|
||||||
alloc.logger.V(7).Info("CEL result", "device", deviceID, "class", klog.KObj(class), "selector", i, "expression", selector.CEL.Expression, "matches", matches, "actualCost", ptr.Deref(details.ActualCost(), 0), "err", err)
|
alloc.logger.V(7).Info("CEL result", "device", deviceID, "class", klog.KObj(class), "selector", i, "expression", selector.CEL.Expression, "matches", matches, "actualCost", ptr.Deref(details.ActualCost(), 0), "err", err)
|
||||||
} else {
|
} else {
|
||||||
@ -953,15 +972,18 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
|||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a device consumes capacity from a capacity pool, verify that
|
// The API validation logic has checked the ConsumesCounter referred should exist inside SharedCounters.
|
||||||
// there is sufficient capacity available.
|
if alloc.features.PartitionableDevices && len(device.basic.ConsumesCounter) > 0 {
|
||||||
ok, err := alloc.checkAvailableCapacity(device)
|
// If a device consumes capacity from a capacity pool, verify that
|
||||||
if err != nil {
|
// there is sufficient capacity available.
|
||||||
return false, nil, err
|
ok, err := alloc.checkAvailableCapacity(device)
|
||||||
}
|
if err != nil {
|
||||||
if !ok {
|
return false, nil, err
|
||||||
alloc.logger.V(7).Info("Insufficient capacity", "device", device.id)
|
}
|
||||||
return false, nil, nil
|
if !ok {
|
||||||
|
alloc.logger.V(7).Info("Insufficient capacity", "device", device.id)
|
||||||
|
return false, nil, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var parentRequestName string
|
var parentRequestName string
|
||||||
@ -977,13 +999,13 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
|||||||
|
|
||||||
// Might be tainted, in which case the taint has to be tolerated.
|
// Might be tainted, in which case the taint has to be tolerated.
|
||||||
// The check is skipped if the feature is disabled.
|
// The check is skipped if the feature is disabled.
|
||||||
if alloc.deviceTaintsEnabled && !allTaintsTolerated(device.basic, request) {
|
if alloc.features.DeviceTaints && !allTaintsTolerated(device.basic, request) {
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's available. Now check constraints.
|
// It's available. Now check constraints.
|
||||||
for i, constraint := range alloc.constraints[r.claimIndex] {
|
for i, constraint := range alloc.constraints[r.claimIndex] {
|
||||||
added := constraint.add(baseRequestName, subRequestName, device.device, device.id)
|
added := constraint.add(baseRequestName, subRequestName, device.basic, device.id)
|
||||||
if !added {
|
if !added {
|
||||||
if must {
|
if must {
|
||||||
// It does not make sense to declare a claim where a constraint prevents getting
|
// It does not make sense to declare a claim where a constraint prevents getting
|
||||||
@ -993,7 +1015,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
|||||||
|
|
||||||
// Roll back for all previous constraints before we return.
|
// Roll back for all previous constraints before we return.
|
||||||
for e := 0; e < i; e++ {
|
for e := 0; e < i; e++ {
|
||||||
alloc.constraints[r.claimIndex][e].remove(baseRequestName, subRequestName, device.device, device.id)
|
alloc.constraints[r.claimIndex][e].remove(baseRequestName, subRequestName, device.basic, device.id)
|
||||||
}
|
}
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
}
|
}
|
||||||
@ -1009,7 +1031,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
|||||||
request: request.name(),
|
request: request.name(),
|
||||||
parentRequest: parentRequestName,
|
parentRequest: parentRequestName,
|
||||||
id: device.id,
|
id: device.id,
|
||||||
device: device.device,
|
basic: device.basic,
|
||||||
slice: device.slice,
|
slice: device.slice,
|
||||||
}
|
}
|
||||||
if request.adminAccess() {
|
if request.adminAccess() {
|
||||||
@ -1020,7 +1042,7 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
|||||||
|
|
||||||
return true, func() {
|
return true, func() {
|
||||||
for _, constraint := range alloc.constraints[r.claimIndex] {
|
for _, constraint := range alloc.constraints[r.claimIndex] {
|
||||||
constraint.remove(baseRequestName, subRequestName, device.device, device.id)
|
constraint.remove(baseRequestName, subRequestName, device.basic, device.id)
|
||||||
}
|
}
|
||||||
if !request.adminAccess() {
|
if !request.adminAccess() {
|
||||||
alloc.allocatingDevices[device.id] = false
|
alloc.allocatingDevices[device.id] = false
|
||||||
@ -1052,23 +1074,24 @@ func taintTolerated(taint resourceapi.DeviceTaint, request requestAccessor) bool
|
|||||||
func (alloc *allocator) checkAvailableCapacity(device deviceWithID) (bool, error) {
|
func (alloc *allocator) checkAvailableCapacity(device deviceWithID) (bool, error) {
|
||||||
slice := device.slice
|
slice := device.slice
|
||||||
|
|
||||||
referencedCapacityPools := sets.New[api.UniqueString]()
|
referencedSharedCounters := sets.New[draapi.UniqueString]()
|
||||||
for _, consumedCapacity := range device.device.ConsumesCapacity {
|
for _, consumedCounter := range device.basic.ConsumesCounter {
|
||||||
referencedCapacityPools.Insert(consumedCapacity.CapacityPool)
|
referencedSharedCounters.Insert(consumedCounter.SharedCounter)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a structure that captures the initial capacity for all pools
|
// Create a structure that captures the initial counter for all sharedCounters
|
||||||
// referenced by the device.
|
// referenced by the device.
|
||||||
availableCapacities := make(map[api.UniqueString]map[api.QualifiedName]api.DeviceCapacity)
|
availableCounters := make(map[draapi.UniqueString]map[string]draapi.Counter)
|
||||||
for _, capacityPool := range slice.Spec.CapacityPools {
|
for _, counterSet := range slice.Spec.SharedCounters {
|
||||||
if !referencedCapacityPools.Has(capacityPool.Name) {
|
if !referencedSharedCounters.Has(counterSet.Name) {
|
||||||
|
// the API validation logic has been added to make sure the counterSet referred should exist in capacityPools
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
poolCapacity := make(map[api.QualifiedName]api.DeviceCapacity)
|
counterShared := make(map[string]draapi.Counter, len(counterSet.Counters))
|
||||||
for name, cap := range capacityPool.Capacity {
|
for name, cap := range counterSet.Counters {
|
||||||
poolCapacity[name] = cap
|
counterShared[name] = cap
|
||||||
}
|
}
|
||||||
availableCapacities[capacityPool.Name] = poolCapacity
|
availableCounters[counterSet.Name] = counterShared
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the data structure to reflect capacity already in use.
|
// Update the data structure to reflect capacity already in use.
|
||||||
@ -1078,30 +1101,30 @@ func (alloc *allocator) checkAvailableCapacity(device deviceWithID) (bool, error
|
|||||||
Pool: slice.Spec.Pool.Name,
|
Pool: slice.Spec.Pool.Name,
|
||||||
Device: device.Name,
|
Device: device.Name,
|
||||||
}
|
}
|
||||||
if !(alloc.allocatedDevices.Has(deviceID) || alloc.allocatingDevices[deviceID]) {
|
if !alloc.allocatedDevices.Has(deviceID) && !alloc.allocatingDevices[deviceID] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, consumedCapacity := range device.ConsumesCapacity {
|
for _, consumedCounter := range device.Basic.ConsumesCounter {
|
||||||
poolCapacity := availableCapacities[consumedCapacity.CapacityPool]
|
counterShared := availableCounters[consumedCounter.SharedCounter]
|
||||||
for name, cap := range consumedCapacity.Capacity {
|
for name, cap := range consumedCounter.Counters {
|
||||||
existingCap, ok := poolCapacity[name]
|
existingCap, ok := counterShared[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
// Just continue for now, but this probably should be an error.
|
// the API validation logic has been added to make sure the capacity referred should exist in capacityPools
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// This can potentially result in negative available capacity. That is fine,
|
// This can potentially result in negative available capacity. That is fine,
|
||||||
// we just treat it as no capacity available.
|
// we just treat it as no capacity available.
|
||||||
existingCap.Value.Sub(cap.Value)
|
existingCap.Value.Sub(cap.Value)
|
||||||
poolCapacity[name] = existingCap
|
counterShared[name] = existingCap
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if all consumed capacities for the device can be satisfied.
|
// Check if all consumed capacities for the device can be satisfied.
|
||||||
for _, deviceConsumedCapacity := range device.device.ConsumesCapacity {
|
for _, deviceConsumedCounter := range device.basic.ConsumesCounter {
|
||||||
poolCapacity := availableCapacities[deviceConsumedCapacity.CapacityPool]
|
counterShared := availableCounters[deviceConsumedCounter.SharedCounter]
|
||||||
for name, cap := range deviceConsumedCapacity.Capacity {
|
for name, cap := range deviceConsumedCounter.Counters {
|
||||||
availableCap, found := poolCapacity[name]
|
availableCap, found := counterShared[name]
|
||||||
// If the device requests a capacity that doesn't exist in
|
// If the device requests a capacity that doesn't exist in
|
||||||
// the pool, it can not be allocated.
|
// the pool, it can not be allocated.
|
||||||
if !found {
|
if !found {
|
||||||
@ -1131,9 +1154,11 @@ func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.N
|
|||||||
slice := result[i].slice
|
slice := result[i].slice
|
||||||
var nodeName draapi.UniqueString
|
var nodeName draapi.UniqueString
|
||||||
var nodeSelector *v1.NodeSelector
|
var nodeSelector *v1.NodeSelector
|
||||||
if slice.Spec.PerDeviceNodeSelection {
|
if ptr.Deref(slice.Spec.PerDeviceNodeSelection, false) {
|
||||||
nodeName = result[i].device.NodeName
|
if result[i].basic.NodeName != nil {
|
||||||
nodeSelector = result[i].device.NodeSelector
|
nodeName = draapi.MakeUniqueString(*result[i].basic.NodeName)
|
||||||
|
}
|
||||||
|
nodeSelector = result[i].basic.NodeSelector
|
||||||
} else {
|
} else {
|
||||||
nodeName = slice.Spec.NodeName
|
nodeName = slice.Spec.NodeName
|
||||||
nodeSelector = slice.Spec.NodeSelector
|
nodeSelector = slice.Spec.NodeSelector
|
||||||
|
@ -253,31 +253,29 @@ const (
|
|||||||
fromDeviceCapacityConsumption = "fromDeviceCapacityConsumption"
|
fromDeviceCapacityConsumption = "fromDeviceCapacityConsumption"
|
||||||
)
|
)
|
||||||
|
|
||||||
func compositeDevice(name string, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
|
func partitionableDevice(name string, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
|
||||||
consumesCapacity ...resourceapi.DeviceCapacityConsumption) resourceapi.Device {
|
consumesCapacity ...resourceapi.DeviceCounterConsumption) resourceapi.Device {
|
||||||
|
|
||||||
device := resourceapi.Device{
|
device := resourceapi.Device{
|
||||||
Name: name,
|
Name: name,
|
||||||
Composite: &resourceapi.CompositeDevice{
|
Basic: &resourceapi.BasicDevice{
|
||||||
Attributes: attributes,
|
Attributes: attributes,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
switch capacity := capacity.(type) {
|
switch capacity := capacity.(type) {
|
||||||
case map[resourceapi.QualifiedName]resource.Quantity:
|
case map[resourceapi.QualifiedName]resource.Quantity:
|
||||||
device.Composite.Capacity = toDeviceCapacity(capacity)
|
device.Basic.Capacity = toDeviceCapacity(capacity)
|
||||||
case string:
|
case string:
|
||||||
if capacity == fromDeviceCapacityConsumption {
|
if capacity == fromDeviceCapacityConsumption {
|
||||||
c := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity)
|
c := make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity)
|
||||||
for _, dcc := range consumesCapacity {
|
for _, dcc := range consumesCapacity {
|
||||||
for name, cap := range dcc.Capacity {
|
for name, cap := range dcc.Counters {
|
||||||
if _, found := c[name]; found {
|
ccap := resourceapi.DeviceCapacity(cap)
|
||||||
panic(fmt.Sprintf("same capacity found in multiple device capacity consumptions %q", name))
|
c[resourceapi.QualifiedName(name)] = ccap
|
||||||
}
|
|
||||||
c[name] = cap
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
device.Composite.Capacity = c
|
device.Basic.Capacity = c
|
||||||
} else {
|
} else {
|
||||||
panic(fmt.Sprintf("unexpected capacity value %q", capacity))
|
panic(fmt.Sprintf("unexpected capacity value %q", capacity))
|
||||||
}
|
}
|
||||||
@ -287,29 +285,32 @@ func compositeDevice(name string, capacity any, attributes map[resourceapi.Quali
|
|||||||
panic(fmt.Sprintf("unexpected capacity type %T: %+v", capacity, capacity))
|
panic(fmt.Sprintf("unexpected capacity type %T: %+v", capacity, capacity))
|
||||||
}
|
}
|
||||||
|
|
||||||
device.Composite.ConsumesCapacity = consumesCapacity
|
device.Basic.ConsumesCounter = consumesCapacity
|
||||||
return device
|
return device
|
||||||
}
|
}
|
||||||
|
|
||||||
func compositeDeviceWithNodeSelector(name string, nodeSelection any, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
|
func partitionableDeviceWithNodeSelector(name string, nodeSelection any, capacity any, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute,
|
||||||
consumesCapacity ...resourceapi.DeviceCapacityConsumption) resourceapi.Device {
|
consumesCapacity ...resourceapi.DeviceCounterConsumption) resourceapi.Device {
|
||||||
device := compositeDevice(name, capacity, attributes, consumesCapacity...)
|
device := partitionableDevice(name, capacity, attributes, consumesCapacity...)
|
||||||
|
|
||||||
switch nodeSelection := nodeSelection.(type) {
|
switch nodeSelection := nodeSelection.(type) {
|
||||||
case *v1.NodeSelector:
|
case *v1.NodeSelector:
|
||||||
device.Composite.NodeSelector = nodeSelection
|
device.Basic.NodeSelector = nodeSelection
|
||||||
case string:
|
case string:
|
||||||
if nodeSelection == nodeSelectionAll {
|
if nodeSelection == nodeSelectionAll {
|
||||||
device.Composite.AllNodes = true
|
device.Basic.AllNodes = func() *bool {
|
||||||
|
r := true
|
||||||
|
return &r
|
||||||
|
}()
|
||||||
} else if nodeSelection == nodeSelectionPerDevice {
|
} else if nodeSelection == nodeSelectionPerDevice {
|
||||||
panic("nodeSelectionPerDevice is not supported for devices")
|
panic("nodeSelectionPerDevice is not supported for devices")
|
||||||
} else {
|
} else {
|
||||||
device.Composite.NodeName = nodeSelection
|
device.Basic.NodeName = &nodeSelection
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unexpected nodeSelection type %T: %+v", nodeSelection, nodeSelection))
|
panic(fmt.Sprintf("unexpected nodeSelection type %T: %+v", nodeSelection, nodeSelection))
|
||||||
}
|
}
|
||||||
return wrapDevice(device)
|
return device
|
||||||
}
|
}
|
||||||
|
|
||||||
type wrapDevice resourceapi.Device
|
type wrapDevice resourceapi.Device
|
||||||
@ -325,10 +326,10 @@ func (in wrapDevice) withTaints(taints ...resourceapi.DeviceTaint) wrapDevice {
|
|||||||
return wrapDevice(*device)
|
return wrapDevice(*device)
|
||||||
}
|
}
|
||||||
|
|
||||||
func deviceCapacityConsumption(capacityPool string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.DeviceCapacityConsumption {
|
func deviceCapacityConsumption(capacityPool string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.DeviceCounterConsumption {
|
||||||
return resourceapi.DeviceCapacityConsumption{
|
return resourceapi.DeviceCounterConsumption{
|
||||||
CapacityPool: capacityPool,
|
SharedCounter: capacityPool,
|
||||||
Capacity: toDeviceCapacity(capacity),
|
Counters: toDeviceCounter(capacity),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -367,7 +368,10 @@ func slice(name string, nodeSelection any, pool, driver string, devices ...wrapD
|
|||||||
if nodeSelection == nodeSelectionAll {
|
if nodeSelection == nodeSelectionAll {
|
||||||
slice.Spec.AllNodes = true
|
slice.Spec.AllNodes = true
|
||||||
} else if nodeSelection == nodeSelectionPerDevice {
|
} else if nodeSelection == nodeSelectionPerDevice {
|
||||||
slice.Spec.PerDeviceNodeSelection = true
|
slice.Spec.PerDeviceNodeSelection = func() *bool {
|
||||||
|
r := true
|
||||||
|
return &r
|
||||||
|
}()
|
||||||
} else {
|
} else {
|
||||||
slice.Spec.NodeName = nodeSelection
|
slice.Spec.NodeName = nodeSelection
|
||||||
}
|
}
|
||||||
@ -526,17 +530,17 @@ func sliceWithMultipleDevices(name string, nodeSelection any, pool, driver strin
|
|||||||
return slice(name, nodeSelection, pool, driver, devices...)
|
return slice(name, nodeSelection, pool, driver, devices...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sliceWithCapacityPools(name string, nodeSelection any, pool, driver string, capacityPools []resourceapi.CapacityPool, devices ...resourceapi.Device) *resourceapi.ResourceSlice {
|
func sliceWithCapacityPools(name string, nodeSelection any, pool, driver string, sharedCounters []resourceapi.CounterSet, devices ...resourceapi.Device) *resourceapi.ResourceSlice {
|
||||||
slice := slice(name, nodeSelection, pool, driver)
|
slice := slice(name, nodeSelection, pool, driver)
|
||||||
slice.Spec.CapacityPools = capacityPools
|
slice.Spec.SharedCounters = sharedCounters
|
||||||
slice.Spec.Devices = devices
|
slice.Spec.Devices = devices
|
||||||
return slice
|
return slice
|
||||||
}
|
}
|
||||||
|
|
||||||
func capacityPool(name string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.CapacityPool {
|
func counterSet(name string, capacity map[resourceapi.QualifiedName]resource.Quantity) resourceapi.CounterSet {
|
||||||
return resourceapi.CapacityPool{
|
return resourceapi.CounterSet{
|
||||||
Name: name,
|
Name: name,
|
||||||
Capacity: toDeviceCapacity(capacity),
|
Counters: toDeviceCounter(capacity),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,6 +552,14 @@ func toDeviceCapacity(capacity map[resourceapi.QualifiedName]resource.Quantity)
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func toDeviceCounter(capacity map[resourceapi.QualifiedName]resource.Quantity) map[string]resourceapi.Counter {
|
||||||
|
out := make(map[string]resourceapi.Counter, len(capacity))
|
||||||
|
for name, quantity := range capacity {
|
||||||
|
out[string(name)] = resourceapi.Counter{Value: quantity}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
func TestAllocator(t *testing.T) {
|
func TestAllocator(t *testing.T) {
|
||||||
nonExistentAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "NonExistentAttribute")
|
nonExistentAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "NonExistentAttribute")
|
||||||
boolAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "boolAttribute")
|
boolAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "boolAttribute")
|
||||||
@ -1141,7 +1153,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"all-devices-slice-without-devices-prioritized-list": {
|
"all-devices-slice-without-devices-prioritized-list": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
func() wrapResourceClaim {
|
func() wrapResourceClaim {
|
||||||
claim := claimWithRequests(claim0, nil,
|
claim := claimWithRequests(claim0, nil,
|
||||||
@ -1167,7 +1181,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"all-devices-no-slices-prioritized-list": {
|
"all-devices-no-slices-prioritized-list": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
func() wrapResourceClaim {
|
func() wrapResourceClaim {
|
||||||
claim := claimWithRequests(claim0, nil,
|
claim := claimWithRequests(claim0, nil,
|
||||||
@ -1192,7 +1208,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"all-devices-some-allocated-prioritized-list": {
|
"all-devices-some-allocated-prioritized-list": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
func() wrapResourceClaim {
|
func() wrapResourceClaim {
|
||||||
claim := claimWithRequests(claim0, nil,
|
claim := claimWithRequests(claim0, nil,
|
||||||
@ -1647,6 +1665,18 @@ func TestAllocator(t *testing.T) {
|
|||||||
|
|
||||||
expectError: gomega.MatchError(gomega.ContainSubstring("empty constraint (unsupported constraint type?)")),
|
expectError: gomega.MatchError(gomega.ContainSubstring("empty constraint (unsupported constraint type?)")),
|
||||||
},
|
},
|
||||||
|
"unknown-device": {
|
||||||
|
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(
|
||||||
|
func() *resourceapi.ResourceSlice {
|
||||||
|
slice := sliceWithOneDevice(slice1, node1, pool1, driverA)
|
||||||
|
slice.Spec.Devices[0].Basic = nil /* empty = unknown future extension */
|
||||||
|
return slice
|
||||||
|
}(),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
},
|
||||||
"invalid-CEL-one-device": {
|
"invalid-CEL-one-device": {
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
func() wrapResourceClaim {
|
func() wrapResourceClaim {
|
||||||
@ -1724,7 +1754,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
expectError: gomega.MatchError(gomega.ContainSubstring("exceeds the claim limit")),
|
expectError: gomega.MatchError(gomega.ContainSubstring("exceeds the claim limit")),
|
||||||
},
|
},
|
||||||
"prioritized-list-first-unavailable": {
|
"prioritized-list-first-unavailable": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
subRequest(subReq0, classB, 1),
|
subRequest(subReq0, classB, 1),
|
||||||
subRequest(subReq1, classA, 1),
|
subRequest(subReq1, classA, 1),
|
||||||
@ -1739,7 +1771,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-non-available": {
|
"prioritized-list-non-available": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
subRequest(subReq0, classB, 2),
|
subRequest(subReq0, classB, 2),
|
||||||
subRequest(subReq1, classA, 2),
|
subRequest(subReq1, classA, 2),
|
||||||
@ -1754,7 +1788,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
expectResults: nil,
|
expectResults: nil,
|
||||||
},
|
},
|
||||||
"prioritized-list-device-config": {
|
"prioritized-list-device-config": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
claimWithAll(claim0,
|
claimWithAll(claim0,
|
||||||
objects(
|
objects(
|
||||||
@ -1795,7 +1831,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-class-config": {
|
"prioritized-list-class-config": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
subRequest(subReq0, classA, 2),
|
subRequest(subReq0, classA, 2),
|
||||||
subRequest(subReq1, classB, 2),
|
subRequest(subReq1, classB, 2),
|
||||||
@ -1831,7 +1869,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-subrequests-with-expressions": {
|
"prioritized-list-subrequests-with-expressions": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
claimWithRequests(claim0, nil,
|
claimWithRequests(claim0, nil,
|
||||||
request(req0, classA, 1, resourceapi.DeviceSelector{
|
request(req0, classA, 1, resourceapi.DeviceSelector{
|
||||||
@ -1873,7 +1913,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-subrequests-with-constraints-ref-parent-request": {
|
"prioritized-list-subrequests-with-constraints-ref-parent-request": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
claimWithRequests(claim0,
|
claimWithRequests(claim0,
|
||||||
[]resourceapi.DeviceConstraint{
|
[]resourceapi.DeviceConstraint{
|
||||||
@ -1941,7 +1983,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-subrequests-with-constraints-ref-sub-request": {
|
"prioritized-list-subrequests-with-constraints-ref-sub-request": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
claimWithRequests(claim0,
|
claimWithRequests(claim0,
|
||||||
[]resourceapi.DeviceConstraint{
|
[]resourceapi.DeviceConstraint{
|
||||||
@ -2001,7 +2045,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-subrequests-with-allocation-mode-all": {
|
"prioritized-list-subrequests-with-allocation-mode-all": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
func() wrapResourceClaim {
|
func() wrapResourceClaim {
|
||||||
claim := claimWithRequests(claim0, nil,
|
claim := claimWithRequests(claim0, nil,
|
||||||
@ -2033,7 +2079,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-allocation-mode-all-multiple-requests": {
|
"prioritized-list-allocation-mode-all-multiple-requests": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
claimWithRequests(claim0, nil,
|
claimWithRequests(claim0, nil,
|
||||||
request(req0, classA, 1),
|
request(req0, classA, 1),
|
||||||
@ -2064,7 +2112,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-disabled": {
|
"prioritized-list-disabled": {
|
||||||
prioritizedList: false,
|
features: Features{
|
||||||
|
PrioritizedList: false,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
func() wrapResourceClaim {
|
func() wrapResourceClaim {
|
||||||
claim := claimWithRequests(claim0, nil,
|
claim := claimWithRequests(claim0, nil,
|
||||||
@ -2083,7 +2133,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
expectError: gomega.MatchError(gomega.ContainSubstring("claim claim-0, request req-0: has subrequests, but the DRAPrioritizedList feature is disabled")),
|
expectError: gomega.MatchError(gomega.ContainSubstring("claim claim-0, request req-0: has subrequests, but the DRAPrioritizedList feature is disabled")),
|
||||||
},
|
},
|
||||||
"prioritized-list-multi-request": {
|
"prioritized-list-multi-request": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
claimWithRequests(claim0, nil,
|
claimWithRequests(claim0, nil,
|
||||||
request(req1, classA, 1, resourceapi.DeviceSelector{
|
request(req1, classA, 1, resourceapi.DeviceSelector{
|
||||||
@ -2132,7 +2184,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-with-backtracking": {
|
"prioritized-list-with-backtracking": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(
|
claimsToAllocate: objects(
|
||||||
claimWithRequests(claim0, nil,
|
claimWithRequests(claim0, nil,
|
||||||
requestWithPrioritizedList(req0,
|
requestWithPrioritizedList(req0,
|
||||||
@ -2181,7 +2235,9 @@ func TestAllocator(t *testing.T) {
|
|||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"prioritized-list-too-many-in-first-subrequest": {
|
"prioritized-list-too-many-in-first-subrequest": {
|
||||||
prioritizedList: true,
|
features: Features{
|
||||||
|
PrioritizedList: true,
|
||||||
|
},
|
||||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
subRequest(subReq0, classB, 500),
|
subRequest(subReq0, classB, 500),
|
||||||
subRequest(subReq1, classA, 1),
|
subRequest(subReq1, classA, 1),
|
||||||
@ -2205,13 +2261,13 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("8Gi"),
|
"memory": resource.MustParse("8Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device1, nil, nil,
|
partitionableDevice(device1, nil, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2225,7 +2281,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
"partitionable-devices-multiple-devices": {
|
"partitionable-devices-prioritized-list": {
|
||||||
features: Features{
|
features: Features{
|
||||||
PrioritizedList: true,
|
PrioritizedList: true,
|
||||||
PartitionableDevices: true,
|
PartitionableDevices: true,
|
||||||
@ -2250,13 +2306,13 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("8Gi"),
|
"memory": resource.MustParse("8Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device1,
|
partitionableDevice(device1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
}, nil,
|
}, nil,
|
||||||
@ -2266,7 +2322,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device2,
|
partitionableDevice(device2,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("6Gi"),
|
"memory": resource.MustParse("6Gi"),
|
||||||
}, nil,
|
}, nil,
|
||||||
@ -2276,7 +2332,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2291,6 +2347,60 @@ func TestAllocator(t *testing.T) {
|
|||||||
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
|
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
|
||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
|
"partitionable-devices-multiple-devices": {
|
||||||
|
features: Features{
|
||||||
|
PartitionableDevices: true,
|
||||||
|
},
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0, nil,
|
||||||
|
request(req0, classA, 1),
|
||||||
|
request(req1, classA, 1),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
|
objects(
|
||||||
|
counterSet(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("8Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
partitionableDevice(device1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
}, nil,
|
||||||
|
deviceCapacityConsumption(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
partitionableDevice(device2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("6Gi"),
|
||||||
|
}, nil,
|
||||||
|
deviceCapacityConsumption(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("6Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
partitionableDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||||
|
deviceCapacityConsumption(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req1, driverA, pool1, device3, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
"partitionable-devices-multiple-capacity-pools": {
|
"partitionable-devices-multiple-capacity-pools": {
|
||||||
features: Features{
|
features: Features{
|
||||||
PrioritizedList: true,
|
PrioritizedList: true,
|
||||||
@ -2316,18 +2426,18 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("18Gi"),
|
"memory": resource.MustParse("18Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
capacityPool(capacityPool2,
|
counterSet(capacityPool2,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"cpus": resource.MustParse("8"),
|
"cpus": resource.MustParse("8"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2339,7 +2449,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("6Gi"),
|
"memory": resource.MustParse("6Gi"),
|
||||||
@ -2351,7 +2461,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2371,6 +2481,82 @@ func TestAllocator(t *testing.T) {
|
|||||||
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
|
deviceAllocationResult(req1SubReq1, driverA, pool1, device3, false),
|
||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
|
"partitionable-devices-multiple-counters": {
|
||||||
|
features: Features{
|
||||||
|
PartitionableDevices: true,
|
||||||
|
},
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0, nil,
|
||||||
|
request(req0, classA, 1),
|
||||||
|
request(req1, classA, 1),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
|
objects(
|
||||||
|
counterSet(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"cpus": resource.MustParse("8"),
|
||||||
|
"memory": resource.MustParse("18Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
counterSet(capacityPool2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"cpus": resource.MustParse("12"),
|
||||||
|
"memory": resource.MustParse("18Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
partitionableDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||||
|
deviceCapacityConsumption(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
"cpus": resource.MustParse("6"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
deviceCapacityConsumption(capacityPool2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"cpus": resource.MustParse("4"),
|
||||||
|
"memory": resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
partitionableDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||||
|
deviceCapacityConsumption(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("6Gi"),
|
||||||
|
"cpus": resource.MustParse("4"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
deviceCapacityConsumption(capacityPool2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"cpus": resource.MustParse("6"),
|
||||||
|
"memory": resource.MustParse("6Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
partitionableDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||||
|
deviceCapacityConsumption(capacityPool1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
"cpus": resource.MustParse("4"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
deviceCapacityConsumption(capacityPool2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"cpus": resource.MustParse("4"),
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0, driverA, pool1, device2, false),
|
||||||
|
deviceAllocationResult(req1, driverA, pool1, device3, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
"partitionable-devices-no-capacity-available": {
|
"partitionable-devices-no-capacity-available": {
|
||||||
features: Features{
|
features: Features{
|
||||||
PartitionableDevices: true,
|
PartitionableDevices: true,
|
||||||
@ -2383,20 +2569,20 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("18Gi"),
|
"memory": resource.MustParse("18Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("16Gi"),
|
"memory": resource.MustParse("16Gi"),
|
||||||
@ -2422,20 +2608,20 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("18Gi"),
|
"memory": resource.MustParse("18Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("20Gi"),
|
"memory": resource.MustParse("20Gi"),
|
||||||
@ -2461,13 +2647,13 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, node1, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("18Gi"),
|
"memory": resource.MustParse("18Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device1, nil, nil,
|
partitionableDevice(device1, nil, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2502,20 +2688,20 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("18Gi"),
|
"memory": resource.MustParse("18Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDeviceWithNodeSelector(device1, node1, fromDeviceCapacityConsumption, nil,
|
partitionableDeviceWithNodeSelector(device1, node1, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDeviceWithNodeSelector(device2, node2, fromDeviceCapacityConsumption, nil,
|
partitionableDeviceWithNodeSelector(device2, node2, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("6Gi"),
|
"memory": resource.MustParse("6Gi"),
|
||||||
@ -2540,13 +2726,13 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("18Gi"),
|
"memory": resource.MustParse("18Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
|
partitionableDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2580,27 +2766,27 @@ func TestAllocator(t *testing.T) {
|
|||||||
classes: objects(class(classA, driverA), class(classB, driverB)),
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
slices: objects(sliceWithCapacityPools(slice1, nodeSelectionPerDevice, pool1, driverA,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool1,
|
counterSet(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("18Gi"),
|
"memory": resource.MustParse("18Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
|
partitionableDeviceWithNodeSelector(device1, nodeLabelSelector(regionKey, region1), fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDeviceWithNodeSelector(device2, node1, fromDeviceCapacityConsumption, nil,
|
partitionableDeviceWithNodeSelector(device2, node1, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDeviceWithNodeSelector(device3, nodeSelectionAll, fromDeviceCapacityConsumption, nil,
|
partitionableDeviceWithNodeSelector(device3, nodeSelectionAll, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool1,
|
deviceCapacityConsumption(capacityPool1,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2609,27 +2795,27 @@ func TestAllocator(t *testing.T) {
|
|||||||
),
|
),
|
||||||
), sliceWithCapacityPools(slice2, node1, pool2, driverB,
|
), sliceWithCapacityPools(slice2, node1, pool2, driverB,
|
||||||
objects(
|
objects(
|
||||||
capacityPool(capacityPool2,
|
counterSet(capacityPool2,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("12Gi"),
|
"memory": resource.MustParse("12Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device1, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device1, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool2,
|
deviceCapacityConsumption(capacityPool2,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device2, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device2, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool2,
|
deviceCapacityConsumption(capacityPool2,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
compositeDevice(device3, fromDeviceCapacityConsumption, nil,
|
partitionableDevice(device3, fromDeviceCapacityConsumption, nil,
|
||||||
deviceCapacityConsumption(capacityPool2,
|
deviceCapacityConsumption(capacityPool2,
|
||||||
map[resourceapi.QualifiedName]resource.Quantity{
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
"memory": resource.MustParse("4Gi"),
|
"memory": resource.MustParse("4Gi"),
|
||||||
@ -2710,7 +2896,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"tainted-disabled": {
|
"tainted-disabled": {
|
||||||
features: Features{
|
features: Features{
|
||||||
DeviceTaints: true,
|
DeviceTaints: false,
|
||||||
},
|
},
|
||||||
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
@ -2725,7 +2911,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"tainted-prioritized-list": {
|
"tainted-prioritized-list": {
|
||||||
features: Features{
|
features: Features{
|
||||||
DeviceTaints: true,
|
DeviceTaints: true,
|
||||||
PrioritizedList: true,
|
PrioritizedList: true,
|
||||||
},
|
},
|
||||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
@ -2740,7 +2926,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
},
|
},
|
||||||
"tainted-prioritized-list-disabled": {
|
"tainted-prioritized-list-disabled": {
|
||||||
features: Features{
|
features: Features{
|
||||||
DeviceTaints: false,
|
DeviceTaints: false,
|
||||||
PrioritizedList: true,
|
PrioritizedList: true,
|
||||||
},
|
},
|
||||||
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
@ -2761,7 +2947,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
"tainted-admin-access": {
|
"tainted-admin-access": {
|
||||||
features: Features{
|
features: Features{
|
||||||
DeviceTaints: true,
|
DeviceTaints: true,
|
||||||
PrioritizedList: true,
|
AdminAccess: true,
|
||||||
},
|
},
|
||||||
claimsToAllocate: func() []wrapResourceClaim {
|
claimsToAllocate: func() []wrapResourceClaim {
|
||||||
c := claim(claim0, req0, classA)
|
c := claim(claim0, req0, classA)
|
||||||
@ -2781,7 +2967,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
"tainted-admin-access-disabled": {
|
"tainted-admin-access-disabled": {
|
||||||
features: Features{
|
features: Features{
|
||||||
DeviceTaints: false,
|
DeviceTaints: false,
|
||||||
AdminAccess: true,
|
AdminAccess: true,
|
||||||
},
|
},
|
||||||
claimsToAllocate: func() []wrapResourceClaim {
|
claimsToAllocate: func() []wrapResourceClaim {
|
||||||
c := claim(claim0, req0, classA)
|
c := claim(claim0, req0, classA)
|
||||||
|
@ -27,48 +27,72 @@ import (
|
|||||||
draapi "k8s.io/dynamic-resource-allocation/api"
|
draapi "k8s.io/dynamic-resource-allocation/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func nodeMatches(node *v1.Node, nodeNameToMatch string, allNodesMatch bool, nodeSelector *v1.NodeSelector) (bool, error) {
|
||||||
|
switch {
|
||||||
|
case nodeNameToMatch != "":
|
||||||
|
return node != nil && node.Name == nodeNameToMatch, nil
|
||||||
|
case allNodesMatch:
|
||||||
|
return true, nil
|
||||||
|
case nodeSelector != nil:
|
||||||
|
selector, err := nodeaffinity.NewNodeSelector(nodeSelector)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to parse node selector %s: %w", nodeSelector.String(), err)
|
||||||
|
}
|
||||||
|
return selector.Match(node), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GatherPools collects information about all resource pools which provide
|
// GatherPools collects information about all resource pools which provide
|
||||||
// devices that are accessible from the given node.
|
// devices that are accessible from the given node.
|
||||||
//
|
//
|
||||||
// Out-dated slices are silently ignored. Pools may be incomplete (not all
|
// Out-dated slices are silently ignored. Pools may be incomplete (not all
|
||||||
// required slices available) or invalid (for example, device names not unique).
|
// required slices available) or invalid (for example, device names not unique).
|
||||||
// Both is recorded in the result.
|
// Both is recorded in the result.
|
||||||
func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node, partitionableDevicesEnabled bool) ([]*Pool, error) {
|
func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node, features Features) ([]*Pool, error) {
|
||||||
pools := make(map[PoolID]*Pool)
|
pools := make(map[PoolID]*Pool)
|
||||||
nodeName := ""
|
|
||||||
if node != nil {
|
|
||||||
nodeName = node.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, slice := range slices {
|
for _, slice := range slices {
|
||||||
|
if !features.PartitionableDevices && (len(slice.Spec.SharedCounters) > 0 || slice.Spec.PerDeviceNodeSelection != nil) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case slice.Spec.NodeName != "":
|
case slice.Spec.NodeName != "" || slice.Spec.AllNodes || slice.Spec.NodeSelector != nil:
|
||||||
if slice.Spec.NodeName == nodeName {
|
match, err := nodeMatches(node, slice.Spec.NodeName, slice.Spec.AllNodes, slice.Spec.NodeSelector)
|
||||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
|
||||||
return nil, fmt.Errorf("add node slice %s: %w", slice.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case slice.Spec.AllNodes:
|
|
||||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
|
||||||
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
|
|
||||||
}
|
|
||||||
case slice.Spec.NodeSelector != nil:
|
|
||||||
// TODO: move conversion into api.
|
|
||||||
selector, err := nodeaffinity.NewNodeSelector(slice.Spec.NodeSelector)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("node selector in resource slice %s: %w", slice.Name, err)
|
return nil, fmt.Errorf("failed to perform node selection for slice %s: %w", slice.Name, err)
|
||||||
}
|
}
|
||||||
if selector.Match(node) {
|
if match {
|
||||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
if err := addSlice(pools, slice); err != nil {
|
||||||
return nil, fmt.Errorf("add matching slice %s: %w", slice.Name, err)
|
return nil, fmt.Errorf("failed to add node slice %s: %w", slice.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case slice.Spec.PerDeviceNodeSelection:
|
case slice.Spec.PerDeviceNodeSelection != nil && *slice.Spec.PerDeviceNodeSelection:
|
||||||
// We add the slice here regardless of whether the partitionable devices feature is
|
for _, device := range slice.Spec.Devices {
|
||||||
// enabled. If we don't, the full slice will be considered incomplete. So we filter
|
if device.Basic == nil {
|
||||||
// out devices that have fields from the partitionable devices feature set later.
|
continue
|
||||||
if err := addSlice(pools, slice, node, partitionableDevicesEnabled); err != nil {
|
}
|
||||||
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
|
var nodeName string
|
||||||
|
var allNodes bool
|
||||||
|
if device.Basic.NodeName != nil {
|
||||||
|
nodeName = *device.Basic.NodeName
|
||||||
|
}
|
||||||
|
if device.Basic.AllNodes != nil {
|
||||||
|
allNodes = *device.Basic.AllNodes
|
||||||
|
}
|
||||||
|
match, err := nodeMatches(node, nodeName, allNodes, device.Basic.NodeSelector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to perform node selection for device %s in slice %s: %w",
|
||||||
|
device.String(), slice.Name, err)
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
if err := addSlice(pools, slice); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to add node slice %s: %w", slice.Name, err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
// Nothing known was set. This must be some future, unknown extension,
|
// Nothing known was set. This must be some future, unknown extension,
|
||||||
@ -94,16 +118,9 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice, node *v1.Node, partitionableDevicesEnabled bool) error {
|
func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice) error {
|
||||||
var slice draapi.ResourceSlice
|
var slice draapi.ResourceSlice
|
||||||
sliceScope := draapi.SliceScope{
|
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, nil); err != nil {
|
||||||
SliceContext: draapi.SliceContext{
|
|
||||||
Slice: s,
|
|
||||||
Node: node,
|
|
||||||
PartitionableDevicesEnabled: partitionableDevicesEnabled,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, sliceScope); err != nil {
|
|
||||||
return fmt.Errorf("convert ResourceSlice: %w", err)
|
return fmt.Errorf("convert ResourceSlice: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,11 +338,11 @@ claims:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
allocator, err := structured.NewAllocator(tCtx,
|
allocator, err := structured.NewAllocator(tCtx, structured.Features{
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
|
PrioritizedList: utilfeature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList),
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList),
|
AdminAccess: utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.DRADeviceTaints),
|
DeviceTaints: utilfeature.DefaultFeatureGate.Enabled(features.DRADeviceTaints),
|
||||||
[]*resourceapi.ResourceClaim{claim}, allocatedDevices, draManager.DeviceClasses(), slices, celCache)
|
}, []*resourceapi.ResourceClaim{claim}, allocatedDevices, draManager.DeviceClasses(), slices, celCache)
|
||||||
tCtx.ExpectNoError(err, "create allocator")
|
tCtx.ExpectNoError(err, "create allocator")
|
||||||
|
|
||||||
rand.Shuffle(len(nodes), func(i, j int) {
|
rand.Shuffle(len(nodes), func(i, j int) {
|
||||||
|
Loading…
Reference in New Issue
Block a user