mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
DRA: Update allocator for Prioritized Alternatives in Device Requests
This commit is contained in:
parent
cc35f9b8e8
commit
2229a78dfe
@ -21,6 +21,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||||
@ -103,6 +104,7 @@ type informationForClaim struct {
|
|||||||
type DynamicResources struct {
|
type DynamicResources struct {
|
||||||
enabled bool
|
enabled bool
|
||||||
enableAdminAccess bool
|
enableAdminAccess bool
|
||||||
|
enablePrioritizedList bool
|
||||||
enableSchedulingQueueHint bool
|
enableSchedulingQueueHint bool
|
||||||
|
|
||||||
fh framework.Handle
|
fh framework.Handle
|
||||||
@ -121,6 +123,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
|
|||||||
pl := &DynamicResources{
|
pl := &DynamicResources{
|
||||||
enabled: true,
|
enabled: true,
|
||||||
enableAdminAccess: fts.EnableDRAAdminAccess,
|
enableAdminAccess: fts.EnableDRAAdminAccess,
|
||||||
|
enablePrioritizedList: fts.EnableDRAPrioritizedList,
|
||||||
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
|
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
|
||||||
|
|
||||||
fh: fh,
|
fh: fh,
|
||||||
@ -405,20 +408,19 @@ func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
|||||||
// initial set of potential nodes before we ask the
|
// initial set of potential nodes before we ask the
|
||||||
// driver(s) for information about the specific pod.
|
// driver(s) for information about the specific pod.
|
||||||
for _, request := range claim.Spec.Devices.Requests {
|
for _, request := range claim.Spec.Devices.Requests {
|
||||||
if request.DeviceClassName == "" {
|
// The requirements differ depending on whether the request has a list of
|
||||||
return nil, statusError(logger, fmt.Errorf("request %s: unsupported request type", request.Name))
|
// alternative subrequests defined in the firstAvailable field.
|
||||||
}
|
if len(request.FirstAvailable) == 0 {
|
||||||
|
if status := pl.validateDeviceClass(logger, request.DeviceClassName, request.Name); status != nil {
|
||||||
_, err := pl.draManager.DeviceClasses().Get(request.DeviceClassName)
|
return nil, status
|
||||||
if err != nil {
|
}
|
||||||
// If the class cannot be retrieved, allocation cannot proceed.
|
} else {
|
||||||
if apierrors.IsNotFound(err) {
|
for _, subRequest := range request.FirstAvailable {
|
||||||
// Here we mark the pod as "unschedulable", so it'll sleep in
|
qualRequestName := strings.Join([]string{request.Name, subRequest.Name}, "/")
|
||||||
// the unscheduleable queue until a DeviceClass event occurs.
|
if status := pl.validateDeviceClass(logger, subRequest.DeviceClassName, qualRequestName); status != nil {
|
||||||
return nil, statusUnschedulable(logger, fmt.Sprintf("request %s: device class %s does not exist", request.Name, request.DeviceClassName))
|
return nil, status
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Other error, retry with backoff.
|
|
||||||
return nil, statusError(logger, fmt.Errorf("request %s: look up device class: %w", request.Name, err))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -447,7 +449,7 @@ func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, statusError(logger, err)
|
return nil, statusError(logger, err)
|
||||||
}
|
}
|
||||||
allocator, err := structured.NewAllocator(ctx, pl.enableAdminAccess, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache)
|
allocator, err := structured.NewAllocator(ctx, pl.enableAdminAccess, pl.enablePrioritizedList, allocateClaims, allAllocatedDevices, pl.draManager.DeviceClasses(), slices, pl.celCache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, statusError(logger, err)
|
return nil, statusError(logger, err)
|
||||||
}
|
}
|
||||||
@ -459,6 +461,23 @@ func (pl *DynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pl *DynamicResources) validateDeviceClass(logger klog.Logger, deviceClassName, requestName string) *framework.Status {
|
||||||
|
if deviceClassName == "" {
|
||||||
|
return statusError(logger, fmt.Errorf("request %s: unsupported request type", requestName))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := pl.draManager.DeviceClasses().Get(deviceClassName)
|
||||||
|
if err != nil {
|
||||||
|
// If the class cannot be retrieved, allocation cannot proceed.
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
// Here we mark the pod as "unschedulable", so it'll sleep in
|
||||||
|
// the unscheduleable queue until a DeviceClass event occurs.
|
||||||
|
return statusUnschedulable(logger, fmt.Sprintf("request %s: device class %s does not exist", requestName, deviceClassName))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// PreFilterExtensions returns prefilter extensions, pod add and remove.
|
// PreFilterExtensions returns prefilter extensions, pod add and remove.
|
||||||
func (pl *DynamicResources) PreFilterExtensions() framework.PreFilterExtensions {
|
func (pl *DynamicResources) PreFilterExtensions() framework.PreFilterExtensions {
|
||||||
return nil
|
return nil
|
||||||
|
@ -117,9 +117,17 @@ var (
|
|||||||
Namespace(namespace).
|
Namespace(namespace).
|
||||||
Request(className).
|
Request(className).
|
||||||
Obj()
|
Obj()
|
||||||
|
claimWithPrioritzedList = st.MakeResourceClaim().
|
||||||
|
Name(claimName).
|
||||||
|
Namespace(namespace).
|
||||||
|
RequestWithPrioritizedList(className).
|
||||||
|
Obj()
|
||||||
pendingClaim = st.FromResourceClaim(claim).
|
pendingClaim = st.FromResourceClaim(claim).
|
||||||
OwnerReference(podName, podUID, podKind).
|
OwnerReference(podName, podUID, podKind).
|
||||||
Obj()
|
Obj()
|
||||||
|
pendingClaimWithPrioritizedList = st.FromResourceClaim(claimWithPrioritzedList).
|
||||||
|
OwnerReference(podName, podUID, podKind).
|
||||||
|
Obj()
|
||||||
allocationResult = &resourceapi.AllocationResult{
|
allocationResult = &resourceapi.AllocationResult{
|
||||||
Devices: resourceapi.DeviceAllocationResult{
|
Devices: resourceapi.DeviceAllocationResult{
|
||||||
Results: []resourceapi.DeviceRequestAllocationResult{{
|
Results: []resourceapi.DeviceRequestAllocationResult{{
|
||||||
@ -133,13 +141,33 @@ var (
|
|||||||
return st.MakeNodeSelector().In("metadata.name", []string{nodeName}, st.NodeSelectorTypeMatchFields).Obj()
|
return st.MakeNodeSelector().In("metadata.name", []string{nodeName}, st.NodeSelectorTypeMatchFields).Obj()
|
||||||
}(),
|
}(),
|
||||||
}
|
}
|
||||||
|
allocationResultWithPrioritizedList = &resourceapi.AllocationResult{
|
||||||
|
Devices: resourceapi.DeviceAllocationResult{
|
||||||
|
Results: []resourceapi.DeviceRequestAllocationResult{{
|
||||||
|
Driver: driver,
|
||||||
|
Pool: nodeName,
|
||||||
|
Device: "instance-1",
|
||||||
|
Request: "req-1/subreq-1",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
NodeSelector: func() *v1.NodeSelector {
|
||||||
|
return st.MakeNodeSelector().In("metadata.name", []string{nodeName}, st.NodeSelectorTypeMatchFields).Obj()
|
||||||
|
}(),
|
||||||
|
}
|
||||||
inUseClaim = st.FromResourceClaim(pendingClaim).
|
inUseClaim = st.FromResourceClaim(pendingClaim).
|
||||||
Allocation(allocationResult).
|
Allocation(allocationResult).
|
||||||
ReservedForPod(podName, types.UID(podUID)).
|
ReservedForPod(podName, types.UID(podUID)).
|
||||||
Obj()
|
Obj()
|
||||||
|
inUseClaimWithPrioritizedList = st.FromResourceClaim(pendingClaimWithPrioritizedList).
|
||||||
|
Allocation(allocationResultWithPrioritizedList).
|
||||||
|
ReservedForPod(podName, types.UID(podUID)).
|
||||||
|
Obj()
|
||||||
allocatedClaim = st.FromResourceClaim(pendingClaim).
|
allocatedClaim = st.FromResourceClaim(pendingClaim).
|
||||||
Allocation(allocationResult).
|
Allocation(allocationResult).
|
||||||
Obj()
|
Obj()
|
||||||
|
allocatedClaimWithPrioritizedList = st.FromResourceClaim(pendingClaimWithPrioritizedList).
|
||||||
|
Allocation(allocationResultWithPrioritizedList).
|
||||||
|
Obj()
|
||||||
|
|
||||||
allocatedClaimWithWrongTopology = st.FromResourceClaim(allocatedClaim).
|
allocatedClaimWithWrongTopology = st.FromResourceClaim(allocatedClaim).
|
||||||
Allocation(&resourceapi.AllocationResult{NodeSelector: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}, st.NodeSelectorTypeMatchExpressions).Obj()}).
|
Allocation(&resourceapi.AllocationResult{NodeSelector: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}, st.NodeSelectorTypeMatchExpressions).Obj()}).
|
||||||
@ -201,6 +229,24 @@ func breakCELInClass(class *resourceapi.DeviceClass) *resourceapi.DeviceClass {
|
|||||||
return class
|
return class
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateDeviceClassName(claim *resourceapi.ResourceClaim, deviceClassName string) *resourceapi.ResourceClaim {
|
||||||
|
claim = claim.DeepCopy()
|
||||||
|
for i := range claim.Spec.Devices.Requests {
|
||||||
|
// If the firstAvailable list is empty we update the device class name
|
||||||
|
// on the base request.
|
||||||
|
if len(claim.Spec.Devices.Requests[i].FirstAvailable) == 0 {
|
||||||
|
claim.Spec.Devices.Requests[i].DeviceClassName = deviceClassName
|
||||||
|
} else {
|
||||||
|
// If subrequests are specified, update the device class name on
|
||||||
|
// all of them.
|
||||||
|
for j := range claim.Spec.Devices.Requests[i].FirstAvailable {
|
||||||
|
claim.Spec.Devices.Requests[i].FirstAvailable[j].DeviceClassName = deviceClassName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return claim
|
||||||
|
}
|
||||||
|
|
||||||
// result defines the expected outcome of some operation. It covers
|
// result defines the expected outcome of some operation. It covers
|
||||||
// operation's status and the state of the world (= objects).
|
// operation's status and the state of the world (= objects).
|
||||||
type result struct {
|
type result struct {
|
||||||
@ -295,6 +341,8 @@ func TestPlugin(t *testing.T) {
|
|||||||
// Feature gates. False is chosen so that the uncommon case
|
// Feature gates. False is chosen so that the uncommon case
|
||||||
// doesn't need to be set.
|
// doesn't need to be set.
|
||||||
disableDRA bool
|
disableDRA bool
|
||||||
|
|
||||||
|
enableDRAPrioritizedList bool
|
||||||
}{
|
}{
|
||||||
"empty": {
|
"empty": {
|
||||||
pod: st.MakePod().Name("foo").Namespace("default").Obj(),
|
pod: st.MakePod().Name("foo").Namespace("default").Obj(),
|
||||||
@ -795,6 +843,69 @@ func TestPlugin(t *testing.T) {
|
|||||||
},
|
},
|
||||||
disableDRA: true,
|
disableDRA: true,
|
||||||
},
|
},
|
||||||
|
"claim-with-request-with-unknown-device-class": {
|
||||||
|
pod: podWithClaimName,
|
||||||
|
claims: []*resourceapi.ResourceClaim{updateDeviceClassName(claim, "does-not-exist")},
|
||||||
|
want: want{
|
||||||
|
prefilter: result{
|
||||||
|
status: framework.NewStatus(framework.Unschedulable, `request req-1: device class does-not-exist does not exist`),
|
||||||
|
},
|
||||||
|
postfilter: result{
|
||||||
|
status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"claim-with-prioritized-list-feature-disabled": {
|
||||||
|
enableDRAPrioritizedList: false,
|
||||||
|
pod: podWithClaimName,
|
||||||
|
claims: []*resourceapi.ResourceClaim{claimWithPrioritzedList},
|
||||||
|
classes: []*resourceapi.DeviceClass{deviceClass},
|
||||||
|
want: want{
|
||||||
|
filter: perNodeResult{
|
||||||
|
workerNode.Name: {
|
||||||
|
status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `claim default/my-pod-my-resource, request req-1: has subrequests, but the feature is disabled`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"claim-with-prioritized-list-unknown-device-class": {
|
||||||
|
enableDRAPrioritizedList: true,
|
||||||
|
pod: podWithClaimName,
|
||||||
|
claims: []*resourceapi.ResourceClaim{updateDeviceClassName(claimWithPrioritzedList, "does-not-exist")},
|
||||||
|
want: want{
|
||||||
|
prefilter: result{
|
||||||
|
status: framework.NewStatus(framework.Unschedulable, `request req-1/subreq-1: device class does-not-exist does not exist`),
|
||||||
|
},
|
||||||
|
postfilter: result{
|
||||||
|
status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"claim-with-prioritized-list": {
|
||||||
|
enableDRAPrioritizedList: true,
|
||||||
|
pod: podWithClaimName,
|
||||||
|
claims: []*resourceapi.ResourceClaim{pendingClaimWithPrioritizedList},
|
||||||
|
classes: []*resourceapi.DeviceClass{deviceClass},
|
||||||
|
objs: []apiruntime.Object{workerNodeSlice},
|
||||||
|
want: want{
|
||||||
|
reserve: result{
|
||||||
|
inFlightClaim: allocatedClaimWithPrioritizedList,
|
||||||
|
},
|
||||||
|
prebind: result{
|
||||||
|
assumedClaim: reserve(allocatedClaimWithPrioritizedList, podWithClaimName),
|
||||||
|
changes: change{
|
||||||
|
claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
|
||||||
|
if claim.Name == claimName {
|
||||||
|
claim = claim.DeepCopy()
|
||||||
|
claim.Finalizers = allocatedClaimWithPrioritizedList.Finalizers
|
||||||
|
claim.Status = inUseClaimWithPrioritizedList.Status
|
||||||
|
}
|
||||||
|
return claim
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testcases {
|
for name, tc := range testcases {
|
||||||
@ -809,6 +920,7 @@ func TestPlugin(t *testing.T) {
|
|||||||
features := feature.Features{
|
features := feature.Features{
|
||||||
EnableDRAAdminAccess: tc.enableDRAAdminAccess,
|
EnableDRAAdminAccess: tc.enableDRAAdminAccess,
|
||||||
EnableDynamicResourceAllocation: !tc.disableDRA,
|
EnableDynamicResourceAllocation: !tc.disableDRA,
|
||||||
|
EnableDRAPrioritizedList: tc.enableDRAPrioritizedList,
|
||||||
}
|
}
|
||||||
testCtx := setup(t, nodes, tc.claims, tc.classes, tc.objs, features)
|
testCtx := setup(t, nodes, tc.claims, tc.classes, tc.objs, features)
|
||||||
initialObjects := testCtx.listAll(t)
|
initialObjects := testCtx.listAll(t)
|
||||||
|
@ -20,6 +20,7 @@ package feature
|
|||||||
// This struct allows us to break the dependency of the plugins on
|
// This struct allows us to break the dependency of the plugins on
|
||||||
// the internal k8s features pkg.
|
// the internal k8s features pkg.
|
||||||
type Features struct {
|
type Features struct {
|
||||||
|
EnableDRAPrioritizedList bool
|
||||||
EnableDRAAdminAccess bool
|
EnableDRAAdminAccess bool
|
||||||
EnableDynamicResourceAllocation bool
|
EnableDynamicResourceAllocation bool
|
||||||
EnableVolumeCapacityPriority bool
|
EnableVolumeCapacityPriority bool
|
||||||
|
@ -46,6 +46,7 @@ import (
|
|||||||
// through the WithFrameworkOutOfTreeRegistry option.
|
// through the WithFrameworkOutOfTreeRegistry option.
|
||||||
func NewInTreeRegistry() runtime.Registry {
|
func NewInTreeRegistry() runtime.Registry {
|
||||||
fts := plfeature.Features{
|
fts := plfeature.Features{
|
||||||
|
EnableDRAPrioritizedList: feature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList),
|
||||||
EnableDRAAdminAccess: feature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
|
EnableDRAAdminAccess: feature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
|
||||||
EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation),
|
EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation),
|
||||||
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
||||||
|
@ -1104,6 +1104,28 @@ func (wrapper *ResourceClaimWrapper) Request(deviceClassName string) *ResourceCl
|
|||||||
return wrapper
|
return wrapper
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RequestWithPrioritizedList adds one device request with one subrequest
|
||||||
|
// per provided deviceClassName.
|
||||||
|
func (wrapper *ResourceClaimWrapper) RequestWithPrioritizedList(deviceClassNames ...string) *ResourceClaimWrapper {
|
||||||
|
var prioritizedList []resourceapi.DeviceSubRequest
|
||||||
|
for i, deviceClassName := range deviceClassNames {
|
||||||
|
prioritizedList = append(prioritizedList, resourceapi.DeviceSubRequest{
|
||||||
|
Name: fmt.Sprintf("subreq-%d", i+1),
|
||||||
|
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
|
||||||
|
Count: 1,
|
||||||
|
DeviceClassName: deviceClassName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapper.Spec.Devices.Requests = append(wrapper.Spec.Devices.Requests,
|
||||||
|
resourceapi.DeviceRequest{
|
||||||
|
Name: fmt.Sprintf("req-%d", len(wrapper.Spec.Devices.Requests)+1),
|
||||||
|
FirstAvailable: prioritizedList,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return wrapper
|
||||||
|
}
|
||||||
|
|
||||||
// Allocation sets the allocation of the inner object.
|
// Allocation sets the allocation of the inner object.
|
||||||
func (wrapper *ResourceClaimWrapper) Allocation(allocation *resourceapi.AllocationResult) *ResourceClaimWrapper {
|
func (wrapper *ResourceClaimWrapper) Allocation(allocation *resourceapi.AllocationResult) *ResourceClaimWrapper {
|
||||||
if !slices.Contains(wrapper.ResourceClaim.Finalizers, resourceapi.Finalizer) {
|
if !slices.Contains(wrapper.ResourceClaim.Finalizers, resourceapi.Finalizer) {
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -46,12 +47,13 @@ type deviceClassLister interface {
|
|||||||
// available and the current state of the cluster (claims, classes, resource
|
// available and the current state of the cluster (claims, classes, resource
|
||||||
// slices).
|
// slices).
|
||||||
type Allocator struct {
|
type Allocator struct {
|
||||||
adminAccessEnabled bool
|
adminAccessEnabled bool
|
||||||
claimsToAllocate []*resourceapi.ResourceClaim
|
prioritizedListEnabled bool
|
||||||
allocatedDevices sets.Set[DeviceID]
|
claimsToAllocate []*resourceapi.ResourceClaim
|
||||||
classLister deviceClassLister
|
allocatedDevices sets.Set[DeviceID]
|
||||||
slices []*resourceapi.ResourceSlice
|
classLister deviceClassLister
|
||||||
celCache *cel.Cache
|
slices []*resourceapi.ResourceSlice
|
||||||
|
celCache *cel.Cache
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAllocator returns an allocator for a certain set of claims or an error if
|
// NewAllocator returns an allocator for a certain set of claims or an error if
|
||||||
@ -60,6 +62,7 @@ type Allocator struct {
|
|||||||
// The returned Allocator can be used multiple times and is thread-safe.
|
// The returned Allocator can be used multiple times and is thread-safe.
|
||||||
func NewAllocator(ctx context.Context,
|
func NewAllocator(ctx context.Context,
|
||||||
adminAccessEnabled bool,
|
adminAccessEnabled bool,
|
||||||
|
prioritizedListEnabled bool,
|
||||||
claimsToAllocate []*resourceapi.ResourceClaim,
|
claimsToAllocate []*resourceapi.ResourceClaim,
|
||||||
allocatedDevices sets.Set[DeviceID],
|
allocatedDevices sets.Set[DeviceID],
|
||||||
classLister deviceClassLister,
|
classLister deviceClassLister,
|
||||||
@ -67,12 +70,13 @@ func NewAllocator(ctx context.Context,
|
|||||||
celCache *cel.Cache,
|
celCache *cel.Cache,
|
||||||
) (*Allocator, error) {
|
) (*Allocator, error) {
|
||||||
return &Allocator{
|
return &Allocator{
|
||||||
adminAccessEnabled: adminAccessEnabled,
|
adminAccessEnabled: adminAccessEnabled,
|
||||||
claimsToAllocate: claimsToAllocate,
|
prioritizedListEnabled: prioritizedListEnabled,
|
||||||
allocatedDevices: allocatedDevices,
|
claimsToAllocate: claimsToAllocate,
|
||||||
classLister: classLister,
|
allocatedDevices: allocatedDevices,
|
||||||
slices: slices,
|
classLister: classLister,
|
||||||
celCache: celCache,
|
slices: slices,
|
||||||
|
celCache: celCache,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,9 +152,9 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
// and their requests. For each claim we determine how many devices
|
// and their requests. For each claim we determine how many devices
|
||||||
// need to be allocated. If not all can be stored in the result, the
|
// need to be allocated. If not all can be stored in the result, the
|
||||||
// claim cannot be allocated.
|
// claim cannot be allocated.
|
||||||
numDevicesTotal := 0
|
minDevicesTotal := 0
|
||||||
for claimIndex, claim := range alloc.claimsToAllocate {
|
for claimIndex, claim := range alloc.claimsToAllocate {
|
||||||
numDevicesPerClaim := 0
|
minDevicesPerClaim := 0
|
||||||
|
|
||||||
// If we have any any request that wants "all" devices, we need to
|
// If we have any any request that wants "all" devices, we need to
|
||||||
// figure out how much "all" is. If some pool is incomplete, we stop
|
// figure out how much "all" is. If some pool is incomplete, we stop
|
||||||
@ -161,92 +165,57 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
// has some matching device.
|
// has some matching device.
|
||||||
for requestIndex := range claim.Spec.Devices.Requests {
|
for requestIndex := range claim.Spec.Devices.Requests {
|
||||||
request := &claim.Spec.Devices.Requests[requestIndex]
|
request := &claim.Spec.Devices.Requests[requestIndex]
|
||||||
for i, selector := range request.Selectors {
|
|
||||||
if selector.CEL == nil {
|
|
||||||
// Unknown future selector type!
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s, selector #%d: CEL expression empty (unsupported selector type?)", klog.KObj(claim), request.Name, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !a.adminAccessEnabled && request.AdminAccess != nil {
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s: admin access is requested, but the feature is disabled", klog.KObj(claim), request.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should be set. If it isn't, something changed and we should refuse to proceed.
|
|
||||||
if request.DeviceClassName == "" {
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s: missing device class name (unsupported request type?)", klog.KObj(claim), request.Name)
|
|
||||||
}
|
|
||||||
class, err := alloc.classLister.Get(request.DeviceClassName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s: could not retrieve device class %s: %w", klog.KObj(claim), request.Name, request.DeviceClassName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start collecting information about the request.
|
|
||||||
// The class must be set and stored before calling isSelectable.
|
|
||||||
requestData := requestData{
|
|
||||||
class: class,
|
|
||||||
}
|
|
||||||
requestKey := requestIndices{claimIndex: claimIndex, requestIndex: requestIndex}
|
requestKey := requestIndices{claimIndex: claimIndex, requestIndex: requestIndex}
|
||||||
alloc.requestData[requestKey] = requestData
|
hasSubRequests := len(request.FirstAvailable) > 0
|
||||||
|
|
||||||
switch request.AllocationMode {
|
// Error out if the prioritizedList feature is not enabled and the request
|
||||||
case resourceapi.DeviceAllocationModeExactCount:
|
// has subrequests. This is to avoid surprising behavior for users.
|
||||||
numDevices := request.Count
|
if !a.prioritizedListEnabled && hasSubRequests {
|
||||||
if numDevices > math.MaxInt {
|
return nil, fmt.Errorf("claim %s, request %s: has subrequests, but the feature is disabled", klog.KObj(claim), request.Name)
|
||||||
// Allowed by API validation, but doesn't make sense.
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s: exact count %d is too large", klog.KObj(claim), request.Name, numDevices)
|
|
||||||
}
|
|
||||||
requestData.numDevices = int(numDevices)
|
|
||||||
case resourceapi.DeviceAllocationModeAll:
|
|
||||||
requestData.allDevices = make([]deviceWithID, 0, resourceapi.AllocationResultsMaxSize)
|
|
||||||
for _, pool := range pools {
|
|
||||||
if pool.IsIncomplete {
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s: asks for all devices, but resource pool %s is currently being updated", klog.KObj(claim), request.Name, pool.PoolID)
|
|
||||||
}
|
|
||||||
if pool.IsInvalid {
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s: asks for all devices, but resource pool %s is currently invalid", klog.KObj(claim), request.Name, pool.PoolID)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, slice := range pool.Slices {
|
|
||||||
for deviceIndex := range slice.Spec.Devices {
|
|
||||||
selectable, err := alloc.isSelectable(requestKey, slice, deviceIndex)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if selectable {
|
|
||||||
device := deviceWithID{
|
|
||||||
id: DeviceID{Driver: slice.Spec.Driver, Pool: slice.Spec.Pool.Name, Device: slice.Spec.Devices[deviceIndex].Name},
|
|
||||||
basic: slice.Spec.Devices[deviceIndex].Basic,
|
|
||||||
slice: slice,
|
|
||||||
}
|
|
||||||
requestData.allDevices = append(requestData.allDevices, device)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// At least one device is required for 'All' allocation mode.
|
|
||||||
if len(requestData.allDevices) == 0 {
|
|
||||||
alloc.logger.V(6).Info("Allocation for 'all' devices didn't succeed: no devices found", "claim", klog.KObj(claim), "request", request.Name)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
requestData.numDevices = len(requestData.allDevices)
|
|
||||||
alloc.logger.V(6).Info("Request for 'all' devices", "claim", klog.KObj(claim), "request", request.Name, "numDevicesPerRequest", requestData.numDevices)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("claim %s, request %s: unsupported count mode %s", klog.KObj(claim), request.Name, request.AllocationMode)
|
|
||||||
}
|
}
|
||||||
alloc.requestData[requestKey] = requestData
|
|
||||||
numDevicesPerClaim += requestData.numDevices
|
|
||||||
}
|
|
||||||
alloc.logger.V(6).Info("Checked claim", "claim", klog.KObj(claim), "numDevices", numDevicesPerClaim)
|
|
||||||
|
|
||||||
|
if hasSubRequests {
|
||||||
|
// We need to find the minimum number of devices that can be allocated
|
||||||
|
// for the request, so setting this to a high number so we can do the
|
||||||
|
// easy comparison in the loop.
|
||||||
|
minDevicesPerRequest := math.MaxInt
|
||||||
|
|
||||||
|
// A request with subrequests gets one entry per subrequest in alloc.requestData.
|
||||||
|
// We can only predict a lower number of devices because it depends on which
|
||||||
|
// subrequest gets chosen.
|
||||||
|
for i, subReq := range request.FirstAvailable {
|
||||||
|
reqData, err := alloc.validateDeviceRequest(&deviceSubRequestAccessor{subRequest: &subReq},
|
||||||
|
&deviceRequestAccessor{request: request}, requestKey, pools)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
requestKey.subRequestIndex = i
|
||||||
|
alloc.requestData[requestKey] = reqData
|
||||||
|
if reqData.numDevices < minDevicesPerRequest {
|
||||||
|
minDevicesPerRequest = reqData.numDevices
|
||||||
|
}
|
||||||
|
}
|
||||||
|
minDevicesPerClaim += minDevicesPerRequest
|
||||||
|
} else {
|
||||||
|
reqData, err := alloc.validateDeviceRequest(&deviceRequestAccessor{request: request}, nil, requestKey, pools)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
alloc.requestData[requestKey] = reqData
|
||||||
|
minDevicesPerClaim += reqData.numDevices
|
||||||
|
}
|
||||||
|
}
|
||||||
|
alloc.logger.V(6).Info("Checked claim", "claim", klog.KObj(claim), "minDevices", minDevicesPerClaim)
|
||||||
// Check that we don't end up with too many results.
|
// Check that we don't end up with too many results.
|
||||||
if numDevicesPerClaim > resourceapi.AllocationResultsMaxSize {
|
// This isn't perfectly reliable because numDevicesPerClaim is
|
||||||
return nil, fmt.Errorf("claim %s: number of requested devices %d exceeds the claim limit of %d", klog.KObj(claim), numDevicesPerClaim, resourceapi.AllocationResultsMaxSize)
|
// only a lower bound, so allocation also has to check this.
|
||||||
|
if minDevicesPerClaim > resourceapi.AllocationResultsMaxSize {
|
||||||
|
return nil, fmt.Errorf("claim %s: number of requested devices %d exceeds the claim limit of %d", klog.KObj(claim), minDevicesPerClaim, resourceapi.AllocationResultsMaxSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we don't, then we can pre-allocate the result slices for
|
// If we don't, then we can pre-allocate the result slices for
|
||||||
// appending the actual results later.
|
// appending the actual results later.
|
||||||
alloc.result[claimIndex].devices = make([]internalDeviceResult, 0, numDevicesPerClaim)
|
alloc.result[claimIndex].devices = make([]internalDeviceResult, 0, minDevicesPerClaim)
|
||||||
|
|
||||||
// Constraints are assumed to be monotonic: once a constraint returns
|
// Constraints are assumed to be monotonic: once a constraint returns
|
||||||
// false, adding more devices will not cause it to return true. This
|
// false, adding more devices will not cause it to return true. This
|
||||||
@ -273,7 +242,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
alloc.constraints[claimIndex] = constraints
|
alloc.constraints[claimIndex] = constraints
|
||||||
numDevicesTotal += numDevicesPerClaim
|
minDevicesTotal += minDevicesPerClaim
|
||||||
}
|
}
|
||||||
|
|
||||||
// Selecting a device for a request is independent of what has been
|
// Selecting a device for a request is independent of what has been
|
||||||
@ -284,9 +253,9 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
alloc.deviceMatchesRequest = make(map[matchKey]bool)
|
alloc.deviceMatchesRequest = make(map[matchKey]bool)
|
||||||
|
|
||||||
// We can estimate the size based on what we need to allocate.
|
// We can estimate the size based on what we need to allocate.
|
||||||
alloc.allocatingDevices = make(map[DeviceID]bool, numDevicesTotal)
|
alloc.allocatingDevices = make(map[DeviceID]bool, minDevicesTotal)
|
||||||
|
|
||||||
alloc.logger.V(6).Info("Gathered information about devices", "numAllocated", len(alloc.allocatedDevices), "toBeAllocated", numDevicesTotal)
|
alloc.logger.V(6).Info("Gathered information about devices", "numAllocated", len(alloc.allocatedDevices), "minDevicesToBeAllocated", minDevicesTotal)
|
||||||
|
|
||||||
// In practice, there aren't going to be many different CEL
|
// In practice, there aren't going to be many different CEL
|
||||||
// expressions. Most likely, there is going to be handful of different
|
// expressions. Most likely, there is going to be handful of different
|
||||||
@ -301,7 +270,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
|
|
||||||
// All errors get created such that they can be returned by Allocate
|
// All errors get created such that they can be returned by Allocate
|
||||||
// without further wrapping.
|
// without further wrapping.
|
||||||
done, err := alloc.allocateOne(deviceIndices{})
|
done, err := alloc.allocateOne(deviceIndices{}, false)
|
||||||
if errors.Is(err, errStop) {
|
if errors.Is(err, errStop) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -319,7 +288,7 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
allocationResult.Devices.Results = make([]resourceapi.DeviceRequestAllocationResult, len(internalResult.devices))
|
allocationResult.Devices.Results = make([]resourceapi.DeviceRequestAllocationResult, len(internalResult.devices))
|
||||||
for i, internal := range internalResult.devices {
|
for i, internal := range internalResult.devices {
|
||||||
allocationResult.Devices.Results[i] = resourceapi.DeviceRequestAllocationResult{
|
allocationResult.Devices.Results[i] = resourceapi.DeviceRequestAllocationResult{
|
||||||
Request: internal.request,
|
Request: internal.requestName(),
|
||||||
Driver: internal.id.Driver.String(),
|
Driver: internal.id.Driver.String(),
|
||||||
Pool: internal.id.Pool.String(),
|
Pool: internal.id.Pool.String(),
|
||||||
Device: internal.id.Device.String(),
|
Device: internal.id.Device.String(),
|
||||||
@ -329,7 +298,15 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
|
|
||||||
// Populate configs.
|
// Populate configs.
|
||||||
for requestIndex := range claim.Spec.Devices.Requests {
|
for requestIndex := range claim.Spec.Devices.Requests {
|
||||||
class := alloc.requestData[requestIndices{claimIndex: claimIndex, requestIndex: requestIndex}].class
|
requestKey := requestIndices{claimIndex: claimIndex, requestIndex: requestIndex}
|
||||||
|
requestData := alloc.requestData[requestKey]
|
||||||
|
if requestData.parentRequest != nil {
|
||||||
|
// We need the class of the selected subrequest.
|
||||||
|
requestKey.subRequestIndex = requestData.selectedSubRequestIndex
|
||||||
|
requestData = alloc.requestData[requestKey]
|
||||||
|
}
|
||||||
|
|
||||||
|
class := requestData.class
|
||||||
if class != nil {
|
if class != nil {
|
||||||
for _, config := range class.Spec.Config {
|
for _, config := range class.Spec.Config {
|
||||||
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
||||||
@ -341,11 +318,42 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, config := range claim.Spec.Devices.Config {
|
for _, config := range claim.Spec.Devices.Config {
|
||||||
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
// If Requests are empty, it applies to all. So it can just be included.
|
||||||
Source: resourceapi.AllocationConfigSourceClaim,
|
if len(config.Requests) == 0 {
|
||||||
Requests: config.Requests,
|
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
||||||
DeviceConfiguration: config.DeviceConfiguration,
|
Source: resourceapi.AllocationConfigSourceClaim,
|
||||||
})
|
Requests: config.Requests,
|
||||||
|
DeviceConfiguration: config.DeviceConfiguration,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, request := range claim.Spec.Devices.Requests {
|
||||||
|
if slices.Contains(config.Requests, request.Name) {
|
||||||
|
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
||||||
|
Source: resourceapi.AllocationConfigSourceClaim,
|
||||||
|
Requests: config.Requests,
|
||||||
|
DeviceConfiguration: config.DeviceConfiguration,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
requestKey := requestIndices{claimIndex: claimIndex, requestIndex: i}
|
||||||
|
requestData := alloc.requestData[requestKey]
|
||||||
|
if requestData.parentRequest == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
subRequest := request.FirstAvailable[requestData.selectedSubRequestIndex]
|
||||||
|
subRequestName := fmt.Sprintf("%s/%s", request.Name, subRequest.Name)
|
||||||
|
if slices.Contains(config.Requests, subRequestName) {
|
||||||
|
allocationResult.Devices.Config = append(allocationResult.Devices.Config, resourceapi.DeviceAllocationConfiguration{
|
||||||
|
Source: resourceapi.AllocationConfigSourceClaim,
|
||||||
|
Requests: config.Requests,
|
||||||
|
DeviceConfiguration: config.DeviceConfiguration,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine node selector.
|
// Determine node selector.
|
||||||
@ -359,6 +367,86 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *allocator) validateDeviceRequest(request requestAccessor, parentRequest requestAccessor, requestKey requestIndices, pools []*Pool) (requestData, error) {
|
||||||
|
claim := a.claimsToAllocate[requestKey.claimIndex]
|
||||||
|
requestData := requestData{
|
||||||
|
request: request,
|
||||||
|
parentRequest: parentRequest,
|
||||||
|
}
|
||||||
|
for i, selector := range request.selectors() {
|
||||||
|
if selector.CEL == nil {
|
||||||
|
// Unknown future selector type!
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s, selector #%d: CEL expression empty (unsupported selector type?)", klog.KObj(claim), request.name(), i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !a.adminAccessEnabled && request.hasAdminAccess() {
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s: admin access is requested, but the feature is disabled", klog.KObj(claim), request.name())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be set. If it isn't, something changed and we should refuse to proceed.
|
||||||
|
if request.deviceClassName() == "" {
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s: missing device class name (unsupported request type?)", klog.KObj(claim), request.name())
|
||||||
|
}
|
||||||
|
class, err := a.classLister.Get(request.deviceClassName())
|
||||||
|
if err != nil {
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s: could not retrieve device class %s: %w", klog.KObj(claim), request.name(), request.deviceClassName(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start collecting information about the request.
|
||||||
|
// The class must be set and stored before calling isSelectable.
|
||||||
|
requestData.class = class
|
||||||
|
|
||||||
|
switch request.allocationMode() {
|
||||||
|
case resourceapi.DeviceAllocationModeExactCount:
|
||||||
|
numDevices := request.count()
|
||||||
|
if numDevices > math.MaxInt {
|
||||||
|
// Allowed by API validation, but doesn't make sense.
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s: exact count %d is too large", klog.KObj(claim), request.name(), numDevices)
|
||||||
|
}
|
||||||
|
requestData.numDevices = int(numDevices)
|
||||||
|
case resourceapi.DeviceAllocationModeAll:
|
||||||
|
// If we have any any request that wants "all" devices, we need to
|
||||||
|
// figure out how much "all" is. If some pool is incomplete, we stop
|
||||||
|
// here because allocation cannot succeed. Once we do scoring, we should
|
||||||
|
// stop in all cases, not just when "all" devices are needed, because
|
||||||
|
// pulling from an incomplete might not pick the best solution and it's
|
||||||
|
// better to wait. This does not matter yet as long the incomplete pool
|
||||||
|
// has some matching device.
|
||||||
|
requestData.allDevices = make([]deviceWithID, 0, resourceapi.AllocationResultsMaxSize)
|
||||||
|
for _, pool := range pools {
|
||||||
|
if pool.IsIncomplete {
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s: asks for all devices, but resource pool %s is currently being updated", klog.KObj(claim), request.name(), pool.PoolID)
|
||||||
|
}
|
||||||
|
if pool.IsInvalid {
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s: asks for all devices, but resource pool %s is currently invalid", klog.KObj(claim), request.name(), pool.PoolID)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, slice := range pool.Slices {
|
||||||
|
for deviceIndex := range slice.Spec.Devices {
|
||||||
|
selectable, err := a.isSelectable(requestKey, requestData, slice, deviceIndex)
|
||||||
|
if err != nil {
|
||||||
|
return requestData, err
|
||||||
|
}
|
||||||
|
if selectable {
|
||||||
|
device := deviceWithID{
|
||||||
|
id: DeviceID{Driver: slice.Spec.Driver, Pool: slice.Spec.Pool.Name, Device: slice.Spec.Devices[deviceIndex].Name},
|
||||||
|
basic: slice.Spec.Devices[deviceIndex].Basic,
|
||||||
|
slice: slice,
|
||||||
|
}
|
||||||
|
requestData.allDevices = append(requestData.allDevices, device)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
requestData.numDevices = len(requestData.allDevices)
|
||||||
|
a.logger.V(6).Info("Request for 'all' devices", "claim", klog.KObj(claim), "request", request.name(), "numDevicesPerRequest", requestData.numDevices)
|
||||||
|
default:
|
||||||
|
return requestData, fmt.Errorf("claim %s, request %s: unsupported count mode %s", klog.KObj(claim), request.name(), request.allocationMode())
|
||||||
|
}
|
||||||
|
return requestData, nil
|
||||||
|
}
|
||||||
|
|
||||||
// errStop is a special error that gets returned by allocateOne if it detects
|
// errStop is a special error that gets returned by allocateOne if it detects
|
||||||
// that allocation cannot succeed.
|
// that allocation cannot succeed.
|
||||||
var errStop = errors.New("stop allocation")
|
var errStop = errors.New("stop allocation")
|
||||||
@ -372,7 +460,7 @@ type allocator struct {
|
|||||||
pools []*Pool
|
pools []*Pool
|
||||||
deviceMatchesRequest map[matchKey]bool
|
deviceMatchesRequest map[matchKey]bool
|
||||||
constraints [][]constraint // one list of constraints per claim
|
constraints [][]constraint // one list of constraints per claim
|
||||||
requestData map[requestIndices]requestData // one entry per request
|
requestData map[requestIndices]requestData // one entry per request with no subrequests and one entry per subrequest
|
||||||
allocatingDevices map[DeviceID]bool
|
allocatingDevices map[DeviceID]bool
|
||||||
result []internalAllocationResult
|
result []internalAllocationResult
|
||||||
}
|
}
|
||||||
@ -383,21 +471,38 @@ type matchKey struct {
|
|||||||
requestIndices
|
requestIndices
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestIndices identifies one specific request by its
|
// requestIndices identifies one specific request
|
||||||
// claim and request index.
|
// or subrequest by three properties:
|
||||||
|
//
|
||||||
|
// - claimIndex: The index of the claim in the requestData map.
|
||||||
|
// - requestIndex: The index of the request in the claim.
|
||||||
|
// - subRequestIndex: The index of the subrequest in the parent request.
|
||||||
type requestIndices struct {
|
type requestIndices struct {
|
||||||
claimIndex, requestIndex int
|
claimIndex, requestIndex int
|
||||||
|
subRequestIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
// deviceIndices identifies one specific required device inside
|
// deviceIndices identifies one specific required device inside
|
||||||
// a request of a certain claim.
|
// a request or subrequest of a certain claim.
|
||||||
type deviceIndices struct {
|
type deviceIndices struct {
|
||||||
claimIndex, requestIndex, deviceIndex int
|
claimIndex int // The index of the claim in the allocator.
|
||||||
|
requestIndex int // The index of the request in the claim.
|
||||||
|
subRequestIndex int // The index of the subrequest within the request (ignored if subRequest is false).
|
||||||
|
deviceIndex int // The index of a device within a request or subrequest.
|
||||||
}
|
}
|
||||||
|
|
||||||
type requestData struct {
|
type requestData struct {
|
||||||
class *resourceapi.DeviceClass
|
// The request or subrequest which needs to be allocated.
|
||||||
numDevices int
|
// Never nil.
|
||||||
|
request requestAccessor
|
||||||
|
// The parent of a subrequest, nil if not a subrequest.
|
||||||
|
parentRequest requestAccessor
|
||||||
|
class *resourceapi.DeviceClass
|
||||||
|
numDevices int
|
||||||
|
|
||||||
|
// selectedSubRequestIndex is set for the entry with requestIndices.subRequestIndex == 0.
|
||||||
|
// It is the index of the subrequest which got picked during allocation.
|
||||||
|
selectedSubRequestIndex int
|
||||||
|
|
||||||
// pre-determined set of devices for allocating "all" devices
|
// pre-determined set of devices for allocating "all" devices
|
||||||
allDevices []deviceWithID
|
allDevices []deviceWithID
|
||||||
@ -414,21 +519,29 @@ type internalAllocationResult struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type internalDeviceResult struct {
|
type internalDeviceResult struct {
|
||||||
request string
|
request string // name of the request (if no subrequests) or the subrequest
|
||||||
id DeviceID
|
parentRequest string // name of the request which contains the subrequest, empty otherwise
|
||||||
slice *draapi.ResourceSlice
|
id DeviceID
|
||||||
adminAccess *bool
|
slice *draapi.ResourceSlice
|
||||||
|
adminAccess *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i internalDeviceResult) requestName() string {
|
||||||
|
if i.parentRequest == "" {
|
||||||
|
return i.request
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s/%s", i.parentRequest, i.request)
|
||||||
}
|
}
|
||||||
|
|
||||||
type constraint interface {
|
type constraint interface {
|
||||||
// add is called whenever a device is about to be allocated. It must
|
// add is called whenever a device is about to be allocated. It must
|
||||||
// check whether the device matches the constraint and if yes,
|
// check whether the device matches the constraint and if yes,
|
||||||
// track that it is allocated.
|
// track that it is allocated.
|
||||||
add(requestName string, device *draapi.BasicDevice, deviceID DeviceID) bool
|
add(requestName, subRequestName string, device *draapi.BasicDevice, deviceID DeviceID) bool
|
||||||
|
|
||||||
// For every successful add there is exactly one matching removed call
|
// For every successful add there is exactly one matching removed call
|
||||||
// with the exact same parameters.
|
// with the exact same parameters.
|
||||||
remove(requestName string, device *draapi.BasicDevice, deviceID DeviceID)
|
remove(requestName, subRequestName string, device *draapi.BasicDevice, deviceID DeviceID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// matchAttributeConstraint compares an attribute value across devices.
|
// matchAttributeConstraint compares an attribute value across devices.
|
||||||
@ -447,8 +560,8 @@ type matchAttributeConstraint struct {
|
|||||||
numDevices int
|
numDevices int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *matchAttributeConstraint) add(requestName string, device *draapi.BasicDevice, deviceID DeviceID) bool {
|
func (m *matchAttributeConstraint) add(requestName, subRequestName string, device *draapi.BasicDevice, deviceID DeviceID) bool {
|
||||||
if m.requestNames.Len() > 0 && !m.requestNames.Has(requestName) {
|
if m.requestNames.Len() > 0 && !m.matches(requestName, subRequestName) {
|
||||||
// Device not affected by constraint.
|
// Device not affected by constraint.
|
||||||
m.logger.V(7).Info("Constraint does not apply to request", "request", requestName)
|
m.logger.V(7).Info("Constraint does not apply to request", "request", requestName)
|
||||||
return true
|
return true
|
||||||
@ -504,8 +617,8 @@ func (m *matchAttributeConstraint) add(requestName string, device *draapi.BasicD
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *matchAttributeConstraint) remove(requestName string, device *draapi.BasicDevice, deviceID DeviceID) {
|
func (m *matchAttributeConstraint) remove(requestName, subRequestName string, device *draapi.BasicDevice, deviceID DeviceID) {
|
||||||
if m.requestNames.Len() > 0 && !m.requestNames.Has(requestName) {
|
if m.requestNames.Len() > 0 && !m.matches(requestName, subRequestName) {
|
||||||
// Device not affected by constraint.
|
// Device not affected by constraint.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -514,6 +627,15 @@ func (m *matchAttributeConstraint) remove(requestName string, device *draapi.Bas
|
|||||||
m.logger.V(7).Info("Device removed from constraint set", "device", deviceID, "numDevices", m.numDevices)
|
m.logger.V(7).Info("Device removed from constraint set", "device", deviceID, "numDevices", m.numDevices)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *matchAttributeConstraint) matches(requestName, subRequestName string) bool {
|
||||||
|
if subRequestName == "" {
|
||||||
|
return m.requestNames.Has(requestName)
|
||||||
|
} else {
|
||||||
|
fullSubRequestName := fmt.Sprintf("%s/%s", requestName, subRequestName)
|
||||||
|
return m.requestNames.Has(requestName) || m.requestNames.Has(fullSubRequestName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func lookupAttribute(device *draapi.BasicDevice, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
|
func lookupAttribute(device *draapi.BasicDevice, deviceID DeviceID, attributeName draapi.FullyQualifiedName) *draapi.DeviceAttribute {
|
||||||
// Fully-qualified match?
|
// Fully-qualified match?
|
||||||
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok {
|
if attr, ok := device.Attributes[draapi.QualifiedName(attributeName)]; ok {
|
||||||
@ -542,7 +664,11 @@ func lookupAttribute(device *draapi.BasicDevice, deviceID DeviceID, attributeNam
|
|||||||
// allocateOne iterates over all eligible devices (not in use, match selector,
|
// allocateOne iterates over all eligible devices (not in use, match selector,
|
||||||
// satisfy constraints) for a specific required device. It returns true if
|
// satisfy constraints) for a specific required device. It returns true if
|
||||||
// everything got allocated, an error if allocation needs to stop.
|
// everything got allocated, an error if allocation needs to stop.
|
||||||
func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) {
|
//
|
||||||
|
// allocateSubRequest is true when trying to allocate one particular subrequest.
|
||||||
|
// This allows the logic for subrequests to call allocateOne with the same
|
||||||
|
// device index without causing infinite recursion.
|
||||||
|
func (alloc *allocator) allocateOne(r deviceIndices, allocateSubRequest bool) (bool, error) {
|
||||||
if r.claimIndex >= len(alloc.claimsToAllocate) {
|
if r.claimIndex >= len(alloc.claimsToAllocate) {
|
||||||
// Done! If we were doing scoring, we would compare the current allocation result
|
// Done! If we were doing scoring, we would compare the current allocation result
|
||||||
// against the previous one, keep the best, and continue. Without scoring, we stop
|
// against the previous one, keep the best, and continue. Without scoring, we stop
|
||||||
@ -554,20 +680,73 @@ func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) {
|
|||||||
claim := alloc.claimsToAllocate[r.claimIndex]
|
claim := alloc.claimsToAllocate[r.claimIndex]
|
||||||
if r.requestIndex >= len(claim.Spec.Devices.Requests) {
|
if r.requestIndex >= len(claim.Spec.Devices.Requests) {
|
||||||
// Done with the claim, continue with the next one.
|
// Done with the claim, continue with the next one.
|
||||||
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex + 1})
|
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex + 1}, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// r.subRequestIndex is zero unless the for loop below is in the
|
||||||
|
// recursion chain.
|
||||||
|
requestKey := requestIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, subRequestIndex: r.subRequestIndex}
|
||||||
|
requestData := alloc.requestData[requestKey]
|
||||||
|
|
||||||
|
// Subrequests are special: we only need to allocate one of them, then
|
||||||
|
// we can move on to the next request. We enter this for loop when
|
||||||
|
// hitting the first subrequest, but not if we are already working on a
|
||||||
|
// specific subrequest.
|
||||||
|
if !allocateSubRequest && requestData.parentRequest != nil {
|
||||||
|
for subRequestIndex := 0; ; subRequestIndex++ {
|
||||||
|
nextSubRequestKey := requestKey
|
||||||
|
nextSubRequestKey.subRequestIndex = subRequestIndex
|
||||||
|
if _, ok := alloc.requestData[nextSubRequestKey]; !ok {
|
||||||
|
// Past the end of the subrequests without finding a solution -> give up.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.subRequestIndex = subRequestIndex
|
||||||
|
success, err := alloc.allocateOne(r, true /* prevent infinite recusion */)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
// If allocation with a subrequest succeeds, return without
|
||||||
|
// attempting the remaining subrequests.
|
||||||
|
if success {
|
||||||
|
// Store the index of the selected subrequest
|
||||||
|
requestData.selectedSubRequestIndex = subRequestIndex
|
||||||
|
alloc.requestData[requestKey] = requestData
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// This is unreachable, so no need to have a return statement here.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up the current request that we are attempting to satisfy. This can
|
||||||
|
// be either a request or a subrequest.
|
||||||
|
request := requestData.request
|
||||||
|
doAllDevices := request.allocationMode() == resourceapi.DeviceAllocationModeAll
|
||||||
|
|
||||||
|
// At least one device is required for 'All' allocation mode.
|
||||||
|
if doAllDevices && len(requestData.allDevices) == 0 {
|
||||||
|
alloc.logger.V(6).Info("Allocation for 'all' devices didn't succeed: no devices found", "claim", klog.KObj(claim), "request", requestData.request.name())
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We already know how many devices per request are needed.
|
// We already know how many devices per request are needed.
|
||||||
// Ready to move on to the next request?
|
|
||||||
requestData := alloc.requestData[requestIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex}]
|
|
||||||
if r.deviceIndex >= requestData.numDevices {
|
if r.deviceIndex >= requestData.numDevices {
|
||||||
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex + 1})
|
// Done with request, continue with next one. We have completed the work for
|
||||||
|
// the request or subrequest, so we can no longer be allocating devices for
|
||||||
|
// a subrequest.
|
||||||
|
return alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex + 1}, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
request := &alloc.claimsToAllocate[r.claimIndex].Spec.Devices.Requests[r.requestIndex]
|
// Before trying to allocate devices, check if allocating the devices
|
||||||
doAllDevices := request.AllocationMode == resourceapi.DeviceAllocationModeAll
|
// in the current request will put us over the threshold.
|
||||||
alloc.logger.V(6).Info("Allocating one device", "currentClaim", r.claimIndex, "totalClaims", len(alloc.claimsToAllocate), "currentRequest", r.requestIndex, "totalRequestsPerClaim", len(claim.Spec.Devices.Requests), "currentDevice", r.deviceIndex, "devicesPerRequest", requestData.numDevices, "allDevices", doAllDevices, "adminAccess", request.AdminAccess)
|
numDevicesAfterAlloc := len(alloc.result[r.claimIndex].devices) + requestData.numDevices
|
||||||
|
if numDevicesAfterAlloc > resourceapi.AllocationResultsMaxSize {
|
||||||
|
// Don't return an error here since we want to keep searching for
|
||||||
|
// a solution that works.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
alloc.logger.V(6).Info("Allocating one device", "currentClaim", r.claimIndex, "totalClaims", len(alloc.claimsToAllocate), "currentRequest", r.requestIndex, "currentSubRequest", r.subRequestIndex, "totalRequestsPerClaim", len(claim.Spec.Devices.Requests), "currentDevice", r.deviceIndex, "devicesPerRequest", requestData.numDevices, "allDevices", doAllDevices, "adminAccess", request.adminAccess())
|
||||||
if doAllDevices {
|
if doAllDevices {
|
||||||
// For "all" devices we already know which ones we need. We
|
// For "all" devices we already know which ones we need. We
|
||||||
// just need to check whether we can use them.
|
// just need to check whether we can use them.
|
||||||
@ -580,9 +759,9 @@ func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) {
|
|||||||
// The order in which we allocate "all" devices doesn't matter,
|
// The order in which we allocate "all" devices doesn't matter,
|
||||||
// so we only try with the one which was up next. If we couldn't
|
// so we only try with the one which was up next. If we couldn't
|
||||||
// get all of them, then there is no solution and we have to stop.
|
// get all of them, then there is no solution and we have to stop.
|
||||||
return false, errStop
|
return false, nil
|
||||||
}
|
}
|
||||||
done, err := alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, deviceIndex: r.deviceIndex + 1})
|
done, err := alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, deviceIndex: r.deviceIndex + 1}, allocateSubRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -606,13 +785,14 @@ func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) {
|
|||||||
deviceID := DeviceID{Driver: pool.Driver, Pool: pool.Pool, Device: slice.Spec.Devices[deviceIndex].Name}
|
deviceID := DeviceID{Driver: pool.Driver, Pool: pool.Pool, Device: slice.Spec.Devices[deviceIndex].Name}
|
||||||
|
|
||||||
// Checking for "in use" is cheap and thus gets done first.
|
// Checking for "in use" is cheap and thus gets done first.
|
||||||
if !ptr.Deref(request.AdminAccess, false) && (alloc.allocatedDevices.Has(deviceID) || alloc.allocatingDevices[deviceID]) {
|
if !request.adminAccess() && (alloc.allocatedDevices.Has(deviceID) || alloc.allocatingDevices[deviceID]) {
|
||||||
alloc.logger.V(7).Info("Device in use", "device", deviceID)
|
alloc.logger.V(7).Info("Device in use", "device", deviceID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next check selectors.
|
// Next check selectors.
|
||||||
selectable, err := alloc.isSelectable(requestIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex}, slice, deviceIndex)
|
requestKey := requestIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, subRequestIndex: r.subRequestIndex}
|
||||||
|
selectable, err := alloc.isSelectable(requestKey, requestData, slice, deviceIndex)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -636,7 +816,13 @@ func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) {
|
|||||||
alloc.logger.V(7).Info("Device not usable", "device", deviceID)
|
alloc.logger.V(7).Info("Device not usable", "device", deviceID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
done, err := alloc.allocateOne(deviceIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, deviceIndex: r.deviceIndex + 1})
|
deviceKey := deviceIndices{
|
||||||
|
claimIndex: r.claimIndex,
|
||||||
|
requestIndex: r.requestIndex,
|
||||||
|
subRequestIndex: r.subRequestIndex,
|
||||||
|
deviceIndex: r.deviceIndex + 1,
|
||||||
|
}
|
||||||
|
done, err := alloc.allocateOne(deviceKey, allocateSubRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -657,7 +843,7 @@ func (alloc *allocator) allocateOne(r deviceIndices) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// isSelectable checks whether a device satisfies the request and class selectors.
|
// isSelectable checks whether a device satisfies the request and class selectors.
|
||||||
func (alloc *allocator) isSelectable(r requestIndices, slice *draapi.ResourceSlice, deviceIndex int) (bool, error) {
|
func (alloc *allocator) isSelectable(r requestIndices, requestData requestData, slice *draapi.ResourceSlice, deviceIndex int) (bool, error) {
|
||||||
// This is the only supported device type at the moment.
|
// This is the only supported device type at the moment.
|
||||||
device := slice.Spec.Devices[deviceIndex].Basic
|
device := slice.Spec.Devices[deviceIndex].Basic
|
||||||
if device == nil {
|
if device == nil {
|
||||||
@ -672,7 +858,6 @@ func (alloc *allocator) isSelectable(r requestIndices, slice *draapi.ResourceSli
|
|||||||
return matches, nil
|
return matches, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
requestData := alloc.requestData[r]
|
|
||||||
if requestData.class != nil {
|
if requestData.class != nil {
|
||||||
match, err := alloc.selectorsMatch(r, device, deviceID, requestData.class, requestData.class.Spec.Selectors)
|
match, err := alloc.selectorsMatch(r, device, deviceID, requestData.class, requestData.class.Spec.Selectors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -684,8 +869,8 @@ func (alloc *allocator) isSelectable(r requestIndices, slice *draapi.ResourceSli
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
request := &alloc.claimsToAllocate[r.claimIndex].Spec.Devices.Requests[r.requestIndex]
|
request := requestData.request
|
||||||
match, err := alloc.selectorsMatch(r, device, deviceID, nil, request.Selectors)
|
match, err := alloc.selectorsMatch(r, device, deviceID, nil, request.selectors())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -752,26 +937,38 @@ func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDev
|
|||||||
// restore the previous state.
|
// restore the previous state.
|
||||||
func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, must bool) (bool, func(), error) {
|
func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, must bool) (bool, func(), error) {
|
||||||
claim := alloc.claimsToAllocate[r.claimIndex]
|
claim := alloc.claimsToAllocate[r.claimIndex]
|
||||||
request := &claim.Spec.Devices.Requests[r.requestIndex]
|
requestKey := requestIndices{claimIndex: r.claimIndex, requestIndex: r.requestIndex, subRequestIndex: r.subRequestIndex}
|
||||||
adminAccess := ptr.Deref(request.AdminAccess, false)
|
requestData := alloc.requestData[requestKey]
|
||||||
if !adminAccess && (alloc.allocatedDevices.Has(device.id) || alloc.allocatingDevices[device.id]) {
|
request := requestData.request
|
||||||
|
if !request.adminAccess() && (alloc.allocatedDevices.Has(device.id) || alloc.allocatingDevices[device.id]) {
|
||||||
alloc.logger.V(7).Info("Device in use", "device", device.id)
|
alloc.logger.V(7).Info("Device in use", "device", device.id)
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var parentRequestName string
|
||||||
|
var baseRequestName string
|
||||||
|
var subRequestName string
|
||||||
|
if requestData.parentRequest == nil {
|
||||||
|
baseRequestName = requestData.request.name()
|
||||||
|
} else {
|
||||||
|
parentRequestName = requestData.parentRequest.name()
|
||||||
|
baseRequestName = parentRequestName
|
||||||
|
subRequestName = requestData.request.name()
|
||||||
|
}
|
||||||
|
|
||||||
// It's available. Now check constraints.
|
// It's available. Now check constraints.
|
||||||
for i, constraint := range alloc.constraints[r.claimIndex] {
|
for i, constraint := range alloc.constraints[r.claimIndex] {
|
||||||
added := constraint.add(request.Name, device.basic, device.id)
|
added := constraint.add(baseRequestName, subRequestName, device.basic, device.id)
|
||||||
if !added {
|
if !added {
|
||||||
if must {
|
if must {
|
||||||
// It does not make sense to declare a claim where a constraint prevents getting
|
// It does not make sense to declare a claim where a constraint prevents getting
|
||||||
// all devices. Treat this as an error.
|
// all devices. Treat this as an error.
|
||||||
return false, nil, fmt.Errorf("claim %s, request %s: cannot add device %s because a claim constraint would not be satisfied", klog.KObj(claim), request.Name, device.id)
|
return false, nil, fmt.Errorf("claim %s, request %s: cannot add device %s because a claim constraint would not be satisfied", klog.KObj(claim), request.name(), device.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Roll back for all previous constraints before we return.
|
// Roll back for all previous constraints before we return.
|
||||||
for e := 0; e < i; e++ {
|
for e := 0; e < i; e++ {
|
||||||
alloc.constraints[r.claimIndex][e].remove(request.Name, device.basic, device.id)
|
alloc.constraints[r.claimIndex][e].remove(baseRequestName, subRequestName, device.basic, device.id)
|
||||||
}
|
}
|
||||||
return false, nil, nil
|
return false, nil, nil
|
||||||
}
|
}
|
||||||
@ -780,25 +977,26 @@ func (alloc *allocator) allocateDevice(r deviceIndices, device deviceWithID, mus
|
|||||||
// All constraints satisfied. Mark as in use (unless we do admin access)
|
// All constraints satisfied. Mark as in use (unless we do admin access)
|
||||||
// and record the result.
|
// and record the result.
|
||||||
alloc.logger.V(7).Info("Device allocated", "device", device.id)
|
alloc.logger.V(7).Info("Device allocated", "device", device.id)
|
||||||
if !adminAccess {
|
if !request.adminAccess() {
|
||||||
alloc.allocatingDevices[device.id] = true
|
alloc.allocatingDevices[device.id] = true
|
||||||
}
|
}
|
||||||
result := internalDeviceResult{
|
result := internalDeviceResult{
|
||||||
request: request.Name,
|
request: request.name(),
|
||||||
id: device.id,
|
parentRequest: parentRequestName,
|
||||||
slice: device.slice,
|
id: device.id,
|
||||||
|
slice: device.slice,
|
||||||
}
|
}
|
||||||
if adminAccess {
|
if request.adminAccess() {
|
||||||
result.adminAccess = &adminAccess
|
result.adminAccess = ptr.To(request.adminAccess())
|
||||||
}
|
}
|
||||||
previousNumResults := len(alloc.result[r.claimIndex].devices)
|
previousNumResults := len(alloc.result[r.claimIndex].devices)
|
||||||
alloc.result[r.claimIndex].devices = append(alloc.result[r.claimIndex].devices, result)
|
alloc.result[r.claimIndex].devices = append(alloc.result[r.claimIndex].devices, result)
|
||||||
|
|
||||||
return true, func() {
|
return true, func() {
|
||||||
for _, constraint := range alloc.constraints[r.claimIndex] {
|
for _, constraint := range alloc.constraints[r.claimIndex] {
|
||||||
constraint.remove(request.Name, device.basic, device.id)
|
constraint.remove(baseRequestName, subRequestName, device.basic, device.id)
|
||||||
}
|
}
|
||||||
if !adminAccess {
|
if !request.adminAccess() {
|
||||||
alloc.allocatingDevices[device.id] = false
|
alloc.allocatingDevices[device.id] = false
|
||||||
}
|
}
|
||||||
// Truncate, but keep the underlying slice.
|
// Truncate, but keep the underlying slice.
|
||||||
@ -855,6 +1053,88 @@ func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.N
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// requestAccessor is an interface for accessing either
|
||||||
|
// DeviceRequests or DeviceSubRequests. It lets most
|
||||||
|
// of the allocator code work with either DeviceRequests
|
||||||
|
// or DeviceSubRequests.
|
||||||
|
type requestAccessor interface {
|
||||||
|
name() string
|
||||||
|
deviceClassName() string
|
||||||
|
allocationMode() resourceapi.DeviceAllocationMode
|
||||||
|
count() int64
|
||||||
|
adminAccess() bool
|
||||||
|
hasAdminAccess() bool
|
||||||
|
selectors() []resourceapi.DeviceSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
// deviceRequestAccessor is an implementation of the
|
||||||
|
// requestAccessor interface for DeviceRequests.
|
||||||
|
type deviceRequestAccessor struct {
|
||||||
|
request *resourceapi.DeviceRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceRequestAccessor) name() string {
|
||||||
|
return d.request.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceRequestAccessor) deviceClassName() string {
|
||||||
|
return d.request.DeviceClassName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceRequestAccessor) allocationMode() resourceapi.DeviceAllocationMode {
|
||||||
|
return d.request.AllocationMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceRequestAccessor) count() int64 {
|
||||||
|
return d.request.Count
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceRequestAccessor) adminAccess() bool {
|
||||||
|
return ptr.Deref(d.request.AdminAccess, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceRequestAccessor) hasAdminAccess() bool {
|
||||||
|
return d.request.AdminAccess != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceRequestAccessor) selectors() []resourceapi.DeviceSelector {
|
||||||
|
return d.request.Selectors
|
||||||
|
}
|
||||||
|
|
||||||
|
// deviceSubRequestAccessor is an implementation of the
|
||||||
|
// requestAccessor interface for DeviceSubRequests.
|
||||||
|
type deviceSubRequestAccessor struct {
|
||||||
|
subRequest *resourceapi.DeviceSubRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceSubRequestAccessor) name() string {
|
||||||
|
return d.subRequest.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceSubRequestAccessor) deviceClassName() string {
|
||||||
|
return d.subRequest.DeviceClassName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceSubRequestAccessor) allocationMode() resourceapi.DeviceAllocationMode {
|
||||||
|
return d.subRequest.AllocationMode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceSubRequestAccessor) count() int64 {
|
||||||
|
return d.subRequest.Count
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceSubRequestAccessor) adminAccess() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceSubRequestAccessor) hasAdminAccess() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *deviceSubRequestAccessor) selectors() []resourceapi.DeviceSelector {
|
||||||
|
return d.subRequest.Selectors
|
||||||
|
}
|
||||||
|
|
||||||
func addNewNodeSelectorRequirements(from []v1.NodeSelectorRequirement, to *[]v1.NodeSelectorRequirement) {
|
func addNewNodeSelectorRequirements(from []v1.NodeSelectorRequirement, to *[]v1.NodeSelectorRequirement) {
|
||||||
for _, requirement := range from {
|
for _, requirement := range from {
|
||||||
if !containsNodeSelectorRequirement(*to, requirement) {
|
if !containsNodeSelectorRequirement(*to, requirement) {
|
||||||
|
@ -41,29 +41,36 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
region1 = "region-1"
|
region1 = "region-1"
|
||||||
region2 = "region-2"
|
region2 = "region-2"
|
||||||
node1 = "node-1"
|
node1 = "node-1"
|
||||||
node2 = "node-2"
|
node2 = "node-2"
|
||||||
classA = "class-a"
|
classA = "class-a"
|
||||||
classB = "class-b"
|
classB = "class-b"
|
||||||
driverA = "driver-a"
|
driverA = "driver-a"
|
||||||
driverB = "driver-b"
|
driverB = "driver-b"
|
||||||
pool1 = "pool-1"
|
pool1 = "pool-1"
|
||||||
pool2 = "pool-2"
|
pool2 = "pool-2"
|
||||||
pool3 = "pool-3"
|
pool3 = "pool-3"
|
||||||
pool4 = "pool-4"
|
pool4 = "pool-4"
|
||||||
req0 = "req-0"
|
req0 = "req-0"
|
||||||
req1 = "req-1"
|
req1 = "req-1"
|
||||||
req2 = "req-2"
|
req2 = "req-2"
|
||||||
req3 = "req-3"
|
req3 = "req-3"
|
||||||
claim0 = "claim-0"
|
subReq0 = "subReq-0"
|
||||||
claim1 = "claim-1"
|
subReq1 = "subReq-1"
|
||||||
slice1 = "slice-1"
|
req0SubReq0 = "req-0/subReq-0"
|
||||||
slice2 = "slice-2"
|
req0SubReq1 = "req-0/subReq-1"
|
||||||
device1 = "device-1"
|
req1SubReq0 = "req-1/subReq-0"
|
||||||
device2 = "device-2"
|
req1SubReq1 = "req-1/subReq-1"
|
||||||
device3 = "device-3"
|
claim0 = "claim-0"
|
||||||
|
claim1 = "claim-1"
|
||||||
|
slice1 = "slice-1"
|
||||||
|
slice2 = "slice-2"
|
||||||
|
device1 = "device-1"
|
||||||
|
device2 = "device-2"
|
||||||
|
device3 = "device-3"
|
||||||
|
device4 = "device-4"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -165,6 +172,24 @@ func request(name, class string, count int64, selectors ...resourceapi.DeviceSel
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func subRequest(name, class string, count int64, selectors ...resourceapi.DeviceSelector) resourceapi.DeviceSubRequest {
|
||||||
|
return resourceapi.DeviceSubRequest{
|
||||||
|
Name: name,
|
||||||
|
Count: count,
|
||||||
|
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
|
||||||
|
DeviceClassName: class,
|
||||||
|
Selectors: selectors,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// genereate a DeviceRequest with the given name and list of prioritized requests.
|
||||||
|
func requestWithPrioritizedList(name string, prioritizedRequests ...resourceapi.DeviceSubRequest) resourceapi.DeviceRequest {
|
||||||
|
return resourceapi.DeviceRequest{
|
||||||
|
Name: name,
|
||||||
|
FirstAvailable: prioritizedRequests,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// generate a ResourceClaim object with the given name, request and class.
|
// generate a ResourceClaim object with the given name, request and class.
|
||||||
func claim(name, req, class string, constraints ...resourceapi.DeviceConstraint) *resourceapi.ResourceClaim {
|
func claim(name, req, class string, constraints ...resourceapi.DeviceConstraint) *resourceapi.ResourceClaim {
|
||||||
claim := claimWithRequests(name, constraints, request(req, class, 1))
|
claim := claimWithRequests(name, constraints, request(req, class, 1))
|
||||||
@ -183,6 +208,19 @@ func claimWithDeviceConfig(name, request, class, driver, attribute string) *reso
|
|||||||
return claim
|
return claim
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func claimWithAll(name string, requests []resourceapi.DeviceRequest, constraints []resourceapi.DeviceConstraint, configs []resourceapi.DeviceClaimConfiguration) *resourceapi.ResourceClaim {
|
||||||
|
claim := claimWithRequests(name, constraints, requests...)
|
||||||
|
claim.Spec.Devices.Config = configs
|
||||||
|
return claim
|
||||||
|
}
|
||||||
|
|
||||||
|
func deviceClaimConfig(requests []string, deviceConfig resourceapi.DeviceConfiguration) resourceapi.DeviceClaimConfiguration {
|
||||||
|
return resourceapi.DeviceClaimConfiguration{
|
||||||
|
Requests: requests,
|
||||||
|
DeviceConfiguration: deviceConfig,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// generate a Device object with the given name, capacity and attributes.
|
// generate a Device object with the given name, capacity and attributes.
|
||||||
func device(name string, capacity map[resourceapi.QualifiedName]resource.Quantity, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) resourceapi.Device {
|
func device(name string, capacity map[resourceapi.QualifiedName]resource.Quantity, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) resourceapi.Device {
|
||||||
device := resourceapi.Device{
|
device := resourceapi.Device{
|
||||||
@ -334,6 +372,16 @@ func allocationResultWithConfig(selector *v1.NodeSelector, driver string, source
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func allocationResultWithConfigs(selector *v1.NodeSelector, results []resourceapi.DeviceRequestAllocationResult, configs []resourceapi.DeviceAllocationConfiguration) resourceapi.AllocationResult {
|
||||||
|
return resourceapi.AllocationResult{
|
||||||
|
Devices: resourceapi.DeviceAllocationResult{
|
||||||
|
Results: results,
|
||||||
|
Config: configs,
|
||||||
|
},
|
||||||
|
NodeSelector: selector,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Helpers
|
// Helpers
|
||||||
|
|
||||||
// convert a list of objects to a slice
|
// convert a list of objects to a slice
|
||||||
@ -351,6 +399,15 @@ func sliceWithOneDevice(name string, nodeSelection any, pool, driver string) *re
|
|||||||
return slice(name, nodeSelection, pool, driver, device(device1, nil, nil))
|
return slice(name, nodeSelection, pool, driver, device(device1, nil, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// generate a ResourceSclie object with the given parameters and the specified number of devices.
|
||||||
|
func sliceWithMultipleDevices(name string, nodeSelection any, pool, driver string, count int) *resourceapi.ResourceSlice {
|
||||||
|
var devices []resourceapi.Device
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
devices = append(devices, device(fmt.Sprintf("device-%d", i), nil, nil))
|
||||||
|
}
|
||||||
|
return slice(name, nodeSelection, pool, driver, devices...)
|
||||||
|
}
|
||||||
|
|
||||||
func TestAllocator(t *testing.T) {
|
func TestAllocator(t *testing.T) {
|
||||||
nonExistentAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "NonExistentAttribute")
|
nonExistentAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "NonExistentAttribute")
|
||||||
boolAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "boolAttribute")
|
boolAttribute := resourceapi.FullyQualifiedName(driverA + "/" + "boolAttribute")
|
||||||
@ -360,6 +417,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
|
|
||||||
testcases := map[string]struct {
|
testcases := map[string]struct {
|
||||||
adminAccess bool
|
adminAccess bool
|
||||||
|
prioritizedList bool
|
||||||
claimsToAllocate []*resourceapi.ResourceClaim
|
claimsToAllocate []*resourceapi.ResourceClaim
|
||||||
allocatedDevices []DeviceID
|
allocatedDevices []DeviceID
|
||||||
classes []*resourceapi.DeviceClass
|
classes []*resourceapi.DeviceClass
|
||||||
@ -917,6 +975,86 @@ func TestAllocator(t *testing.T) {
|
|||||||
deviceAllocationResult(req0, driverA, pool1, device2, true),
|
deviceAllocationResult(req0, driverA, pool1, device2, true),
|
||||||
)},
|
)},
|
||||||
},
|
},
|
||||||
|
"all-devices-slice-without-devices-prioritized-list": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
func() *resourceapi.ResourceClaim {
|
||||||
|
claim := claimWithRequests(claim0, nil,
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1),
|
||||||
|
subRequest(subReq1, classB, 1),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].AllocationMode = resourceapi.DeviceAllocationModeAll
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].Count = 0
|
||||||
|
return claim
|
||||||
|
}(),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
|
slices: objects(
|
||||||
|
sliceWithNoDevices(slice1, node1, pool1, driverA),
|
||||||
|
sliceWithOneDevice(slice2, node1, pool2, driverB),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverB, pool2, device1, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"all-devices-no-slices-prioritized-list": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
func() *resourceapi.ResourceClaim {
|
||||||
|
claim := claimWithRequests(claim0, nil,
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1),
|
||||||
|
subRequest(subReq1, classB, 1),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].AllocationMode = resourceapi.DeviceAllocationModeAll
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].Count = 0
|
||||||
|
return claim
|
||||||
|
}(),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
|
slices: objects(
|
||||||
|
sliceWithOneDevice(slice2, node1, pool2, driverB),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverB, pool2, device1, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"all-devices-some-allocated-prioritized-list": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
func() *resourceapi.ResourceClaim {
|
||||||
|
claim := claimWithRequests(claim0, nil,
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1),
|
||||||
|
subRequest(subReq1, classB, 1),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].AllocationMode = resourceapi.DeviceAllocationModeAll
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].Count = 0
|
||||||
|
return claim
|
||||||
|
}(),
|
||||||
|
),
|
||||||
|
allocatedDevices: []DeviceID{
|
||||||
|
MakeDeviceID(driverA, pool1, device1),
|
||||||
|
},
|
||||||
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverA, device(device1, nil, nil), device(device2, nil, nil)),
|
||||||
|
sliceWithOneDevice(slice2, node1, pool2, driverB),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverB, pool2, device1, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
"network-attached-device": {
|
"network-attached-device": {
|
||||||
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
claimsToAllocate: objects(claim(claim0, req0, classA)),
|
||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
@ -1417,6 +1555,8 @@ func TestAllocator(t *testing.T) {
|
|||||||
),
|
),
|
||||||
),
|
),
|
||||||
classes: objects(class(classA, driverA)),
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(sliceWithMultipleDevices(slice1, node1, pool1, driverA, resourceapi.AllocationResultsMaxSize+1)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
expectError: gomega.MatchError(gomega.ContainSubstring("exceeds the claim limit")),
|
expectError: gomega.MatchError(gomega.ContainSubstring("exceeds the claim limit")),
|
||||||
},
|
},
|
||||||
@ -1426,6 +1566,478 @@ func TestAllocator(t *testing.T) {
|
|||||||
|
|
||||||
expectError: gomega.MatchError(gomega.ContainSubstring("exceeds the claim limit")),
|
expectError: gomega.MatchError(gomega.ContainSubstring("exceeds the claim limit")),
|
||||||
},
|
},
|
||||||
|
"prioritized-list-first-unavailable": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classB, 1),
|
||||||
|
subRequest(subReq1, classA, 1),
|
||||||
|
))),
|
||||||
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
|
slices: objects(sliceWithOneDevice(slice1, node1, pool1, driverA)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverA, pool1, device1, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-non-available": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classB, 2),
|
||||||
|
subRequest(subReq1, classA, 2),
|
||||||
|
))),
|
||||||
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
|
slices: objects(
|
||||||
|
sliceWithOneDevice(slice1, node1, pool1, driverA),
|
||||||
|
sliceWithOneDevice(slice2, node1, pool2, driverB),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: nil,
|
||||||
|
},
|
||||||
|
"prioritized-list-device-config": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithAll(claim0,
|
||||||
|
objects(
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1),
|
||||||
|
subRequest(subReq1, classB, 2),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
nil,
|
||||||
|
objects(
|
||||||
|
deviceClaimConfig([]string{req0SubReq0}, deviceConfiguration(driverA, "foo")),
|
||||||
|
deviceClaimConfig([]string{req0SubReq1}, deviceConfiguration(driverB, "bar")),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
|
slices: objects(slice(slice1, node1, pool1, driverB,
|
||||||
|
device(device1, nil, map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{}),
|
||||||
|
device(device2, nil, map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{}),
|
||||||
|
)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResultWithConfigs(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
objects(
|
||||||
|
deviceAllocationResult(req0SubReq1, driverB, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverB, pool1, device2, false),
|
||||||
|
),
|
||||||
|
[]resourceapi.DeviceAllocationConfiguration{
|
||||||
|
{
|
||||||
|
Source: resourceapi.AllocationConfigSourceClaim,
|
||||||
|
Requests: []string{
|
||||||
|
req0SubReq1,
|
||||||
|
},
|
||||||
|
DeviceConfiguration: deviceConfiguration(driverB, "bar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-class-config": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 2),
|
||||||
|
subRequest(subReq1, classB, 2),
|
||||||
|
))),
|
||||||
|
classes: objects(
|
||||||
|
classWithConfig(classA, driverA, "foo"),
|
||||||
|
classWithConfig(classB, driverB, "bar"),
|
||||||
|
),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverB,
|
||||||
|
device(device1, nil, nil),
|
||||||
|
device(device2, nil, nil),
|
||||||
|
),
|
||||||
|
slice(slice2, node1, pool2, driverA,
|
||||||
|
device(device3, nil, nil),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResultWithConfigs(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
objects(
|
||||||
|
deviceAllocationResult(req0SubReq1, driverB, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverB, pool1, device2, false),
|
||||||
|
),
|
||||||
|
[]resourceapi.DeviceAllocationConfiguration{
|
||||||
|
{
|
||||||
|
Source: resourceapi.AllocationConfigSourceClass,
|
||||||
|
Requests: nil,
|
||||||
|
DeviceConfiguration: deviceConfiguration(driverB, "bar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-subrequests-with-expressions": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0, nil,
|
||||||
|
request(req0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("1Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
requestWithPrioritizedList(req1,
|
||||||
|
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
|
||||||
|
}}),
|
||||||
|
subRequest(subReq1, classA, 2, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("2Gi")) >= 0`, driverA),
|
||||||
|
}}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(slice(slice1, node1, pool1, driverA,
|
||||||
|
device(device1, map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("2Gi"),
|
||||||
|
}, nil),
|
||||||
|
device(device2, map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("2Gi"),
|
||||||
|
}, nil),
|
||||||
|
device(device3, map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("1Gi"),
|
||||||
|
}, nil),
|
||||||
|
)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0, driverA, pool1, device3, false),
|
||||||
|
deviceAllocationResult(req1SubReq1, driverA, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req1SubReq1, driverA, pool1, device2, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-subrequests-with-constraints-ref-parent-request": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0,
|
||||||
|
[]resourceapi.DeviceConstraint{
|
||||||
|
{
|
||||||
|
Requests: []string{req0, req1},
|
||||||
|
MatchAttribute: &versionAttribute,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
request(req0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("8Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
requestWithPrioritizedList(req1,
|
||||||
|
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("2Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("1Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverA,
|
||||||
|
device(device1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("8Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{
|
||||||
|
"driverVersion": {VersionValue: ptr.To("1.0.0")},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
slice(slice2, node1, pool2, driverA,
|
||||||
|
device(device2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{
|
||||||
|
"driverVersion": {VersionValue: ptr.To("2.0.0")},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
device(device3,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("1Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{
|
||||||
|
"driverVersion": {VersionValue: ptr.To("1.0.0")},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req1SubReq1, driverA, pool2, device3, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-subrequests-with-constraints-ref-sub-request": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0,
|
||||||
|
[]resourceapi.DeviceConstraint{
|
||||||
|
{
|
||||||
|
Requests: []string{req0, req1SubReq0},
|
||||||
|
MatchAttribute: &versionAttribute,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
request(req0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("8Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
requestWithPrioritizedList(req1,
|
||||||
|
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("2Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("1Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverA,
|
||||||
|
device(device1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("8Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{
|
||||||
|
"driverVersion": {VersionValue: ptr.To("1.0.0")},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
slice(slice2, node1, pool2, driverA,
|
||||||
|
device(device2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("2Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{
|
||||||
|
"driverVersion": {VersionValue: ptr.To("2.0.0")},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req1SubReq1, driverA, pool2, device2, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-subrequests-with-allocation-mode-all": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
func() *resourceapi.ResourceClaim {
|
||||||
|
claim := claimWithRequests(claim0, nil,
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1),
|
||||||
|
subRequest(subReq1, classA, 1),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].AllocationMode = resourceapi.DeviceAllocationModeAll
|
||||||
|
claim.Spec.Devices.Requests[0].FirstAvailable[0].Count = 0
|
||||||
|
return claim
|
||||||
|
}(),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverA,
|
||||||
|
device(device1, nil, nil),
|
||||||
|
device(device2, nil, nil),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
allocatedDevices: []DeviceID{
|
||||||
|
MakeDeviceID(driverA, pool1, device1),
|
||||||
|
},
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverA, pool1, device2, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-allocation-mode-all-multiple-requests": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0, nil,
|
||||||
|
request(req0, classA, 1),
|
||||||
|
requestWithPrioritizedList(req1,
|
||||||
|
func() resourceapi.DeviceSubRequest {
|
||||||
|
subReq := subRequest(subReq0, classA, 1)
|
||||||
|
subReq.AllocationMode = resourceapi.DeviceAllocationModeAll
|
||||||
|
subReq.Count = 0
|
||||||
|
return subReq
|
||||||
|
}(),
|
||||||
|
subRequest(subReq1, classA, 1),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverA,
|
||||||
|
device(device1, nil, nil),
|
||||||
|
device(device2, nil, nil),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0, driverA, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req1SubReq1, driverA, pool1, device2, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-disabled": {
|
||||||
|
prioritizedList: false,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
func() *resourceapi.ResourceClaim {
|
||||||
|
claim := claimWithRequests(claim0, nil,
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return claim
|
||||||
|
}(),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(sliceWithOneDevice(slice1, node1, pool1, driverA)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: nil,
|
||||||
|
expectError: gomega.MatchError(gomega.ContainSubstring("claim claim-0, request req-0: has subrequests, but the feature is disabled")),
|
||||||
|
},
|
||||||
|
"prioritized-list-multi-request": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0, nil,
|
||||||
|
request(req1, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("8Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("8Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverA,
|
||||||
|
device(device1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("8Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
slice(slice2, node1, pool2, driverA,
|
||||||
|
device(device2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req1, driverA, pool1, device1, false),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverA, pool2, device2, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-with-backtracking": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(
|
||||||
|
claimWithRequests(claim0, nil,
|
||||||
|
requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("8Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
subRequest(subReq1, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("4Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
request(req1, classA, 1, resourceapi.DeviceSelector{
|
||||||
|
CEL: &resourceapi.CELDeviceSelector{
|
||||||
|
Expression: fmt.Sprintf(`device.capacity["%s"].memory.compareTo(quantity("8Gi")) >= 0`, driverA),
|
||||||
|
}},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(
|
||||||
|
slice(slice1, node1, pool1, driverA,
|
||||||
|
device(device1,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("8Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
slice(slice2, node1, pool2, driverA,
|
||||||
|
device(device2,
|
||||||
|
map[resourceapi.QualifiedName]resource.Quantity{
|
||||||
|
"memory": resource.MustParse("4Gi"),
|
||||||
|
},
|
||||||
|
map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{},
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
node: node(node1, region1),
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverA, pool2, device2, false),
|
||||||
|
deviceAllocationResult(req1, driverA, pool1, device1, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
|
"prioritized-list-too-many-in-first-subrequest": {
|
||||||
|
prioritizedList: true,
|
||||||
|
claimsToAllocate: objects(claimWithRequests(claim0, nil, requestWithPrioritizedList(req0,
|
||||||
|
subRequest(subReq0, classB, 500),
|
||||||
|
subRequest(subReq1, classA, 1),
|
||||||
|
))),
|
||||||
|
classes: objects(class(classA, driverA), class(classB, driverB)),
|
||||||
|
slices: objects(sliceWithOneDevice(slice1, node1, pool1, driverA)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: []any{allocationResult(
|
||||||
|
localNodeSelector(node1),
|
||||||
|
deviceAllocationResult(req0SubReq1, driverA, pool1, device1, false),
|
||||||
|
)},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testcases {
|
for name, tc := range testcases {
|
||||||
@ -1444,7 +2056,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
allocatedDevices := slices.Clone(tc.allocatedDevices)
|
allocatedDevices := slices.Clone(tc.allocatedDevices)
|
||||||
slices := slices.Clone(tc.slices)
|
slices := slices.Clone(tc.slices)
|
||||||
|
|
||||||
allocator, err := NewAllocator(ctx, tc.adminAccess, claimsToAllocate, sets.New(allocatedDevices...), classLister, slices, cel.NewCache(1))
|
allocator, err := NewAllocator(ctx, tc.adminAccess, tc.prioritizedList, claimsToAllocate, sets.New(allocatedDevices...), classLister, slices, cel.NewCache(1))
|
||||||
g.Expect(err).ToNot(gomega.HaveOccurred())
|
g.Expect(err).ToNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
results, err := allocator.Allocate(ctx, tc.node)
|
results, err := allocator.Allocate(ctx, tc.node)
|
||||||
|
@ -321,7 +321,8 @@ claims:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
allocator, err := structured.NewAllocator(tCtx, utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess), []*resourceapi.ResourceClaim{claim}, allocatedDevices, draManager.DeviceClasses(), slices, celCache)
|
allocator, err := structured.NewAllocator(tCtx, utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess), utilfeature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList),
|
||||||
|
[]*resourceapi.ResourceClaim{claim}, allocatedDevices, draManager.DeviceClasses(), slices, celCache)
|
||||||
tCtx.ExpectNoError(err, "create allocator")
|
tCtx.ExpectNoError(err, "create allocator")
|
||||||
|
|
||||||
rand.Shuffle(len(nodes), func(i, j int) {
|
rand.Shuffle(len(nodes), func(i, j int) {
|
||||||
|
Loading…
Reference in New Issue
Block a user