mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 03:57:41 +00:00
dra controller: support structured parameters
When allocation was done by the scheduler, the controller needs to do the deallocation because there is no control-plane controller which could react to "DeallocationRequested".
This commit is contained in:
parent
6f1ddfcd2e
commit
3de376ecf6
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -832,9 +833,11 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
return fmt.Errorf("unsupported ReservedFor entry: %v", reservedFor)
|
||||
}
|
||||
|
||||
logger.V(5).Info("claim reserved for counts", "currentCount", len(claim.Status.ReservedFor), "claim", klog.KRef(namespace, name), "updatedCount", len(valid))
|
||||
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourcev1alpha2.Finalizer)
|
||||
logger.V(5).Info("claim reserved for counts", "currentCount", len(claim.Status.ReservedFor), "claim", klog.KRef(namespace, name), "updatedCount", len(valid), "builtinController", builtinControllerFinalizer >= 0)
|
||||
if len(valid) < len(claim.Status.ReservedFor) {
|
||||
// TODO (#113700): patch
|
||||
// This is not using a patch because we want the update to fail if anything
|
||||
// changed in the meantime.
|
||||
claim := claim.DeepCopy()
|
||||
claim.Status.ReservedFor = valid
|
||||
|
||||
@ -854,12 +857,53 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
|
||||
// those, the resource claim controller will trigger deletion when the
|
||||
// pod is done. However, it doesn't hurt to also trigger deallocation
|
||||
// for such claims and not checking for them keeps this code simpler.
|
||||
if len(valid) == 0 &&
|
||||
claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer {
|
||||
if len(valid) == 0 {
|
||||
if builtinControllerFinalizer >= 0 {
|
||||
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer ||
|
||||
claim.DeletionTimestamp != nil {
|
||||
// Allocated by scheduler with structured parameters. We can "deallocate"
|
||||
// by clearing the allocation.
|
||||
claim.Status.Allocation = nil
|
||||
}
|
||||
} else if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer {
|
||||
// DRA driver controller in the control plane
|
||||
// needs to do the deallocation.
|
||||
claim.Status.DeallocationRequested = true
|
||||
}
|
||||
// In all other cases, we keep the claim allocated, in particular for immediate allocation
|
||||
// with a control plane controller.
|
||||
}
|
||||
|
||||
_, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
claim, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now also remove the finalizer if it is not needed anymore.
|
||||
// Note that the index may have changed as a result of the UpdateStatus call.
|
||||
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourcev1alpha2.Finalizer)
|
||||
if builtinControllerFinalizer >= 0 && claim.Status.Allocation == nil {
|
||||
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
|
||||
if _, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if builtinControllerFinalizer >= 0 && claim.DeletionTimestamp != nil && len(valid) == 0 {
|
||||
claim := claim.DeepCopy()
|
||||
if claim.Status.Allocation != nil {
|
||||
// This can happen when a claim with immediate allocation
|
||||
// stopped being used, remained allocated, and then got
|
||||
// deleted. As above we then need to clear the allocation.
|
||||
claim.Status.Allocation = nil
|
||||
var err error
|
||||
claim, err = ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Whether it was allocated or not, remove the finalizer to unblock removal.
|
||||
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
|
||||
_, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(resourceGroup).Resources("resourceclaims").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims", "resourceclaims/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user