mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
DRA admin access: add feature gate
The new DRAAdminAccess feature gate has the following effects: - If disabled in the apiserver, the spec.devices.requests[*].adminAccess field gets cleared. Same in the status. In both cases the scenario that it was already set and a claim or claim template get updated is special: in those cases, the field is not cleared. Also, allocating a claim with admin access is allowed regardless of the feature gate and the field is not cleared. In practice, the scheduler will not do that. - If disabled in the resource claim controller, creating ResourceClaims with the field set gets rejected. This prevents running workloads which depend on admin access. - If disabled in the scheduler, claims with admin access don't get allocated. The effect is the same. The alternative would have been to ignore the fields in claim controller and scheduler. This is bad because a monitoring workload then runs, blocking resources that probably were meant for production workloads.
This commit is contained in:
parent
f3fef01e79
commit
9a7e4ccab2
4
api/openapi-spec/swagger.json
generated
4
api/openapi-spec/swagger.json
generated
@ -15524,7 +15524,7 @@
|
|||||||
"description": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.",
|
"description": "DeviceRequest is a request for devices required for a claim. This is typically a request for a single resource like a device, but can also ask for several identical devices.\n\nA DeviceClassName is currently required. Clients must check that it is indeed set. It's absence indicates that something changed in a way that is not supported by the client yet, in which case it must refuse to handle the request.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"adminAccess": {
|
"adminAccess": {
|
||||||
"description": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.",
|
"description": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
"allocationMode": {
|
"allocationMode": {
|
||||||
@ -15563,7 +15563,7 @@
|
|||||||
"description": "DeviceRequestAllocationResult contains the allocation result for one request.",
|
"description": "DeviceRequestAllocationResult contains the allocation result for one request.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"adminAccess": {
|
"adminAccess": {
|
||||||
"description": "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.",
|
"description": "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set when the DRAAdminAccess feature gate is enabled. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
"device": {
|
"device": {
|
||||||
|
@ -481,7 +481,7 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"adminAccess": {
|
"adminAccess": {
|
||||||
"default": false,
|
"default": false,
|
||||||
"description": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.",
|
"description": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
"allocationMode": {
|
"allocationMode": {
|
||||||
@ -527,7 +527,7 @@
|
|||||||
"description": "DeviceRequestAllocationResult contains the allocation result for one request.",
|
"description": "DeviceRequestAllocationResult contains the allocation result for one request.",
|
||||||
"properties": {
|
"properties": {
|
||||||
"adminAccess": {
|
"adminAccess": {
|
||||||
"description": "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.",
|
"description": "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set when the DRAAdminAccess feature gate is enabled. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
"device": {
|
"device": {
|
||||||
|
@ -407,6 +407,7 @@ func newResourceClaimControllerDescriptor() *ControllerDescriptor {
|
|||||||
func startResourceClaimController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
func startResourceClaimController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||||
ephemeralController, err := resourceclaim.NewController(
|
ephemeralController, err := resourceclaim.NewController(
|
||||||
klog.FromContext(ctx),
|
klog.FromContext(ctx),
|
||||||
|
utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
|
||||||
controllerContext.ClientBuilder.ClientOrDie("resource-claim-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("resource-claim-controller"),
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.InformerFactory.Resource().V1alpha3().ResourceClaims(),
|
controllerContext.InformerFactory.Resource().V1alpha3().ResourceClaims(),
|
||||||
|
@ -442,8 +442,12 @@ type DeviceRequest struct {
|
|||||||
// all ordinary claims to the device with respect to access modes and
|
// all ordinary claims to the device with respect to access modes and
|
||||||
// any resource allocations.
|
// any resource allocations.
|
||||||
//
|
//
|
||||||
|
// This is an alpha field and requires enabling the DRAAdminAccess
|
||||||
|
// feature gate.
|
||||||
|
//
|
||||||
// +optional
|
// +optional
|
||||||
// +default=false
|
// +default=false
|
||||||
|
// +featureGate=DRAAdminAccess
|
||||||
AdminAccess bool
|
AdminAccess bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -786,12 +790,17 @@ type DeviceRequestAllocationResult struct {
|
|||||||
// AdminAccess is a copy of the AdminAccess value in the
|
// AdminAccess is a copy of the AdminAccess value in the
|
||||||
// request which caused this device to be allocated.
|
// request which caused this device to be allocated.
|
||||||
//
|
//
|
||||||
// New allocations are required to have this set. Old allocations made
|
// New allocations are required to have this set when the DRAAdminAccess
|
||||||
|
// feature gate is enabled. Old allocations made
|
||||||
// by Kubernetes 1.31 do not have it yet. Clients which want to
|
// by Kubernetes 1.31 do not have it yet. Clients which want to
|
||||||
// support Kubernetes 1.31 need to look up the request and retrieve
|
// support Kubernetes 1.31 need to look up the request and retrieve
|
||||||
// the value from there if this field is not set.
|
// the value from there if this field is not set.
|
||||||
//
|
//
|
||||||
|
// This is an alpha field and requires enabling the DRAAdminAccess
|
||||||
|
// feature gate.
|
||||||
|
//
|
||||||
// +required
|
// +required
|
||||||
|
// +featureGate=DRAAdminAccess
|
||||||
AdminAccess *bool
|
AdminAccess *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,9 +32,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
"k8s.io/apiserver/pkg/cel"
|
"k8s.io/apiserver/pkg/cel"
|
||||||
"k8s.io/apiserver/pkg/cel/environment"
|
"k8s.io/apiserver/pkg/cel/environment"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
dracel "k8s.io/dynamic-resource-allocation/cel"
|
dracel "k8s.io/dynamic-resource-allocation/cel"
|
||||||
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource"
|
"k8s.io/kubernetes/pkg/apis/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -343,7 +345,7 @@ func validateDeviceRequestAllocationResult(result resource.DeviceRequestAllocati
|
|||||||
allErrs = append(allErrs, validateDriverName(result.Driver, fldPath.Child("driver"))...)
|
allErrs = append(allErrs, validateDriverName(result.Driver, fldPath.Child("driver"))...)
|
||||||
allErrs = append(allErrs, validatePoolName(result.Pool, fldPath.Child("pool"))...)
|
allErrs = append(allErrs, validatePoolName(result.Pool, fldPath.Child("pool"))...)
|
||||||
allErrs = append(allErrs, validateDeviceName(result.Device, fldPath.Child("device"))...)
|
allErrs = append(allErrs, validateDeviceName(result.Device, fldPath.Child("device"))...)
|
||||||
if result.AdminAccess == nil {
|
if result.AdminAccess == nil && utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess) {
|
||||||
allErrs = append(allErrs, field.Required(fldPath.Child("adminAccess"), ""))
|
allErrs = append(allErrs, field.Required(fldPath.Child("adminAccess"), ""))
|
||||||
}
|
}
|
||||||
return allErrs
|
return allErrs
|
||||||
|
@ -27,8 +27,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/apis/core"
|
"k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource"
|
"k8s.io/kubernetes/pkg/apis/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
@ -421,6 +424,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
|||||||
validAllocatedClaimOld.Status.Allocation.Devices.Results[0].AdminAccess = nil // Not required in 1.31.
|
validAllocatedClaimOld.Status.Allocation.Devices.Results[0].AdminAccess = nil // Not required in 1.31.
|
||||||
|
|
||||||
scenarios := map[string]struct {
|
scenarios := map[string]struct {
|
||||||
|
adminAccess bool
|
||||||
oldClaim *resource.ResourceClaim
|
oldClaim *resource.ResourceClaim
|
||||||
update func(claim *resource.ResourceClaim) *resource.ResourceClaim
|
update func(claim *resource.ResourceClaim) *resource.ResourceClaim
|
||||||
wantFailures field.ErrorList
|
wantFailures field.ErrorList
|
||||||
@ -475,6 +479,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"invalid-add-allocation-missing-admin-access": {
|
"invalid-add-allocation-missing-admin-access": {
|
||||||
|
adminAccess: true,
|
||||||
wantFailures: field.ErrorList{
|
wantFailures: field.ErrorList{
|
||||||
field.Required(field.NewPath("status", "allocation", "devices", "results").Index(0).Child("adminAccess"), ""),
|
field.Required(field.NewPath("status", "allocation", "devices", "results").Index(0).Child("adminAccess"), ""),
|
||||||
},
|
},
|
||||||
@ -494,6 +499,24 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
|||||||
return claim
|
return claim
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"okay-add-allocation-missing-admin-access": {
|
||||||
|
adminAccess: false,
|
||||||
|
oldClaim: validClaim,
|
||||||
|
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||||
|
claim.Status.Allocation = &resource.AllocationResult{
|
||||||
|
Devices: resource.DeviceAllocationResult{
|
||||||
|
Results: []resource.DeviceRequestAllocationResult{{
|
||||||
|
Request: goodName,
|
||||||
|
Driver: goodName,
|
||||||
|
Pool: goodName,
|
||||||
|
Device: goodName,
|
||||||
|
AdminAccess: nil, // Intentionally not set.
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return claim
|
||||||
|
},
|
||||||
|
},
|
||||||
"invalid-node-selector": {
|
"invalid-node-selector": {
|
||||||
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "allocation", "nodeSelector", "nodeSelectorTerms"), "must have at least one node selector term")},
|
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "allocation", "nodeSelector", "nodeSelectorTerms"), "must have at least one node selector term")},
|
||||||
oldClaim: validClaim,
|
oldClaim: validClaim,
|
||||||
@ -691,6 +714,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
|||||||
|
|
||||||
for name, scenario := range scenarios {
|
for name, scenario := range scenarios {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAAdminAccess, scenario.adminAccess)
|
||||||
scenario.oldClaim.ResourceVersion = "1"
|
scenario.oldClaim.ResourceVersion = "1"
|
||||||
errs := ValidateResourceClaimStatusUpdate(scenario.update(scenario.oldClaim.DeepCopy()), scenario.oldClaim)
|
errs := ValidateResourceClaimStatusUpdate(scenario.update(scenario.oldClaim.DeepCopy()), scenario.oldClaim)
|
||||||
assert.Equal(t, scenario.wantFailures, errs)
|
assert.Equal(t, scenario.wantFailures, errs)
|
||||||
|
@ -71,6 +71,9 @@ const (
|
|||||||
|
|
||||||
// Controller creates ResourceClaims for ResourceClaimTemplates in a pod spec.
|
// Controller creates ResourceClaims for ResourceClaimTemplates in a pod spec.
|
||||||
type Controller struct {
|
type Controller struct {
|
||||||
|
// adminAccessEnabled matches the DRAAdminAccess feature gate state.
|
||||||
|
adminAccessEnabled bool
|
||||||
|
|
||||||
// kubeClient is the kube API client used to communicate with the API
|
// kubeClient is the kube API client used to communicate with the API
|
||||||
// server.
|
// server.
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
@ -118,12 +121,14 @@ const (
|
|||||||
// NewController creates a ResourceClaim controller.
|
// NewController creates a ResourceClaim controller.
|
||||||
func NewController(
|
func NewController(
|
||||||
logger klog.Logger,
|
logger klog.Logger,
|
||||||
|
adminAccessEnabled bool,
|
||||||
kubeClient clientset.Interface,
|
kubeClient clientset.Interface,
|
||||||
podInformer v1informers.PodInformer,
|
podInformer v1informers.PodInformer,
|
||||||
claimInformer resourceinformers.ResourceClaimInformer,
|
claimInformer resourceinformers.ResourceClaimInformer,
|
||||||
templateInformer resourceinformers.ResourceClaimTemplateInformer) (*Controller, error) {
|
templateInformer resourceinformers.ResourceClaimTemplateInformer) (*Controller, error) {
|
||||||
|
|
||||||
ec := &Controller{
|
ec := &Controller{
|
||||||
|
adminAccessEnabled: adminAccessEnabled,
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
podLister: podInformer.Lister(),
|
podLister: podInformer.Lister(),
|
||||||
podIndexer: podInformer.Informer().GetIndexer(),
|
podIndexer: podInformer.Informer().GetIndexer(),
|
||||||
@ -612,6 +617,10 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
|
|||||||
return fmt.Errorf("resource claim template %q: %v", *templateName, err)
|
return fmt.Errorf("resource claim template %q: %v", *templateName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !ec.adminAccessEnabled && needsAdminAccess(template) {
|
||||||
|
return errors.New("admin access is requested, but the feature is disabled")
|
||||||
|
}
|
||||||
|
|
||||||
// Create the ResourceClaim with pod as owner, with a generated name that uses
|
// Create the ResourceClaim with pod as owner, with a generated name that uses
|
||||||
// <pod>-<claim name> as base.
|
// <pod>-<claim name> as base.
|
||||||
isTrue := true
|
isTrue := true
|
||||||
@ -670,6 +679,15 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func needsAdminAccess(claimTemplate *resourceapi.ResourceClaimTemplate) bool {
|
||||||
|
for _, request := range claimTemplate.Spec.Spec.Devices.Requests {
|
||||||
|
if request.AdminAccess {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// findPodResourceClaim looks for an existing ResourceClaim with the right
|
// findPodResourceClaim looks for an existing ResourceClaim with the right
|
||||||
// annotation (ties it to the pod claim) and the right ownership (ties it to
|
// annotation (ties it to the pod claim) and the right ownership (ties it to
|
||||||
// the pod).
|
// the pod).
|
||||||
|
@ -37,7 +37,6 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
k8stesting "k8s.io/client-go/testing"
|
k8stesting "k8s.io/client-go/testing"
|
||||||
"k8s.io/component-base/metrics/testutil"
|
"k8s.io/component-base/metrics/testutil"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/resourceclaim/metrics"
|
"k8s.io/kubernetes/pkg/controller/resourceclaim/metrics"
|
||||||
"k8s.io/kubernetes/test/utils/ktesting"
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
@ -85,6 +84,7 @@ func TestSyncHandler(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
key string
|
key string
|
||||||
|
adminAccessEnabled bool
|
||||||
claims []*resourceapi.ResourceClaim
|
claims []*resourceapi.ResourceClaim
|
||||||
claimsInCache []*resourceapi.ResourceClaim
|
claimsInCache []*resourceapi.ResourceClaim
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
@ -390,7 +390,7 @@ func TestSyncHandler(t *testing.T) {
|
|||||||
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
|
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
|
||||||
templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
||||||
|
|
||||||
ec, err := NewController(klog.FromContext(tCtx), fakeKubeClient, podInformer, claimInformer, templateInformer)
|
ec, err := NewController(tCtx.Logger(), tc.adminAccessEnabled, fakeKubeClient, podInformer, claimInformer, templateInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating ephemeral controller : %v", err)
|
t.Fatalf("error creating ephemeral controller : %v", err)
|
||||||
}
|
}
|
||||||
@ -465,7 +465,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||||||
templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
||||||
claimClient := fakeKubeClient.ResourceV1alpha3().ResourceClaims(testNamespace)
|
claimClient := fakeKubeClient.ResourceV1alpha3().ResourceClaims(testNamespace)
|
||||||
|
|
||||||
_, err := NewController(tCtx.Logger(), fakeKubeClient, podInformer, claimInformer, templateInformer)
|
_, err := NewController(tCtx.Logger(), false /* admin access */, fakeKubeClient, podInformer, claimInformer, templateInformer)
|
||||||
tCtx.ExpectNoError(err, "creating ephemeral controller")
|
tCtx.ExpectNoError(err, "creating ephemeral controller")
|
||||||
|
|
||||||
informerFactory.Start(tCtx.Done())
|
informerFactory.Start(tCtx.Done())
|
||||||
|
@ -77,6 +77,13 @@ func validateNodeSelectorAuthorizationFeature() []error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateDRAAdminAccessFeature() []error {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess) && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||||
|
return []error{fmt.Errorf("DRAAdminAccess feature requires DynamicResourceAllocation feature to be enabled")}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func validateUnknownVersionInteroperabilityProxyFeature() []error {
|
func validateUnknownVersionInteroperabilityProxyFeature() []error {
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.UnknownVersionInteroperabilityProxy) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.UnknownVersionInteroperabilityProxy) {
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StorageVersionAPI) {
|
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StorageVersionAPI) {
|
||||||
@ -121,6 +128,7 @@ func (s *Options) Validate() []error {
|
|||||||
errs = append(errs, validateUnknownVersionInteroperabilityProxyFeature()...)
|
errs = append(errs, validateUnknownVersionInteroperabilityProxyFeature()...)
|
||||||
errs = append(errs, validateUnknownVersionInteroperabilityProxyFlags(s)...)
|
errs = append(errs, validateUnknownVersionInteroperabilityProxyFlags(s)...)
|
||||||
errs = append(errs, validateNodeSelectorAuthorizationFeature()...)
|
errs = append(errs, validateNodeSelectorAuthorizationFeature()...)
|
||||||
|
errs = append(errs, validateDRAAdminAccessFeature()...)
|
||||||
|
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
@ -203,6 +203,18 @@ const (
|
|||||||
// DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node
|
// DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node
|
||||||
DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion"
|
DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion"
|
||||||
|
|
||||||
|
// owner: @pohly
|
||||||
|
// kep: http://kep.k8s.io/4381
|
||||||
|
//
|
||||||
|
// Enables support for requesting admin access in a ResourceClaim.
|
||||||
|
// Admin access is granted even if a device is already in use and,
|
||||||
|
// depending on the DRA driver, may enable additional permissions
|
||||||
|
// when a container uses the allocated device.
|
||||||
|
//
|
||||||
|
// This feature gate is currently defined in KEP #4381. The intent
|
||||||
|
// is to move it into a separate KEP.
|
||||||
|
DRAAdminAccess featuregate.Feature = "DRAAdminAccess"
|
||||||
|
|
||||||
// owner: @pohly
|
// owner: @pohly
|
||||||
// kep: http://kep.k8s.io/4381
|
// kep: http://kep.k8s.io/4381
|
||||||
//
|
//
|
||||||
|
@ -173,6 +173,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
|||||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
|
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
DRAAdminAccess: {
|
||||||
|
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
},
|
||||||
|
|
||||||
DynamicResourceAllocation: {
|
DynamicResourceAllocation: {
|
||||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||||
},
|
},
|
||||||
|
4
pkg/generated/openapi/zz_generated.openapi.go
generated
4
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -46504,7 +46504,7 @@ func schema_k8sio_api_resource_v1alpha3_DeviceRequest(ref common.ReferenceCallba
|
|||||||
},
|
},
|
||||||
"adminAccess": {
|
"adminAccess": {
|
||||||
SchemaProps: spec.SchemaProps{
|
SchemaProps: spec.SchemaProps{
|
||||||
Description: "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.",
|
Description: "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
Default: false,
|
Default: false,
|
||||||
Type: []string{"boolean"},
|
Type: []string{"boolean"},
|
||||||
Format: "",
|
Format: "",
|
||||||
@ -46560,7 +46560,7 @@ func schema_k8sio_api_resource_v1alpha3_DeviceRequestAllocationResult(ref common
|
|||||||
},
|
},
|
||||||
"adminAccess": {
|
"adminAccess": {
|
||||||
SchemaProps: spec.SchemaProps{
|
SchemaProps: spec.SchemaProps{
|
||||||
Description: "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.",
|
Description: "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set when the DRAAdminAccess feature gate is enabled. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
Type: []string{"boolean"},
|
Type: []string{"boolean"},
|
||||||
Format: "",
|
Format: "",
|
||||||
},
|
},
|
||||||
|
@ -28,9 +28,11 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/registry/generic"
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
"k8s.io/apiserver/pkg/storage"
|
"k8s.io/apiserver/pkg/storage"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource"
|
"k8s.io/kubernetes/pkg/apis/resource"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -172,4 +174,38 @@ func toSelectableFields(claim *resource.ResourceClaim) fields.Set {
|
|||||||
|
|
||||||
// dropDisabledFields removes fields which are covered by a feature gate.
|
// dropDisabledFields removes fields which are covered by a feature gate.
|
||||||
func dropDisabledFields(newClaim, oldClaim *resource.ResourceClaim) {
|
func dropDisabledFields(newClaim, oldClaim *resource.ResourceClaim) {
|
||||||
|
dropDisabledDRAAdminAccessFields(newClaim, oldClaim)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dropDisabledDRAAdminAccessFields(newClaim, oldClaim *resource.ResourceClaim) {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess) {
|
||||||
|
// No need to drop anything.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range newClaim.Spec.Devices.Requests {
|
||||||
|
if oldClaim == nil || i >= len(oldClaim.Spec.Devices.Requests) || !oldClaim.Spec.Devices.Requests[i].AdminAccess {
|
||||||
|
newClaim.Spec.Devices.Requests[i].AdminAccess = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if newClaim.Status.Allocation == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := range newClaim.Status.Allocation.Devices.Results {
|
||||||
|
if newClaim.Status.Allocation.Devices.Results[i].AdminAccess != nil &&
|
||||||
|
(oldClaim == nil || oldClaim.Status.Allocation == nil || i >= len(oldClaim.Status.Allocation.Devices.Results) || oldClaim.Status.Allocation.Devices.Results[i].AdminAccess == nil) &&
|
||||||
|
!requestHasAdminAccess(newClaim, newClaim.Status.Allocation.Devices.Results[i].Request) {
|
||||||
|
newClaim.Status.Allocation.Devices.Results[i].AdminAccess = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestHasAdminAccess(claim *resource.ResourceClaim, requestName string) bool {
|
||||||
|
for _, request := range claim.Spec.Devices.Requests {
|
||||||
|
if request.Name == requestName && request.AdminAccess {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,11 @@ import (
|
|||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource"
|
"k8s.io/kubernetes/pkg/apis/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
var obj = &resource.ResourceClaim{
|
var obj = &resource.ResourceClaim{
|
||||||
@ -31,6 +35,102 @@ var obj = &resource.ResourceClaim{
|
|||||||
Name: "valid-claim",
|
Name: "valid-claim",
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
},
|
},
|
||||||
|
Spec: resource.ResourceClaimSpec{
|
||||||
|
Devices: resource.DeviceClaim{
|
||||||
|
Requests: []resource.DeviceRequest{
|
||||||
|
{
|
||||||
|
Name: "req-0",
|
||||||
|
DeviceClassName: "class",
|
||||||
|
AllocationMode: resource.DeviceAllocationModeAll,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var objWithStatus = &resource.ResourceClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "valid-claim",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: resource.ResourceClaimSpec{
|
||||||
|
Devices: resource.DeviceClaim{
|
||||||
|
Requests: []resource.DeviceRequest{
|
||||||
|
{
|
||||||
|
Name: "req-0",
|
||||||
|
DeviceClassName: "class",
|
||||||
|
AllocationMode: resource.DeviceAllocationModeAll,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: resource.ResourceClaimStatus{
|
||||||
|
Allocation: &resource.AllocationResult{
|
||||||
|
Devices: resource.DeviceAllocationResult{
|
||||||
|
Results: []resource.DeviceRequestAllocationResult{
|
||||||
|
{
|
||||||
|
Request: "req-0",
|
||||||
|
Driver: "dra.example.com",
|
||||||
|
Pool: "pool-0",
|
||||||
|
Device: "device-0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var objWithAdminAccess = &resource.ResourceClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "valid-claim",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: resource.ResourceClaimSpec{
|
||||||
|
Devices: resource.DeviceClaim{
|
||||||
|
Requests: []resource.DeviceRequest{
|
||||||
|
{
|
||||||
|
Name: "req-0",
|
||||||
|
DeviceClassName: "class",
|
||||||
|
AllocationMode: resource.DeviceAllocationModeAll,
|
||||||
|
AdminAccess: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var objWithAdminAccessStatus = &resource.ResourceClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "valid-claim",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: resource.ResourceClaimSpec{
|
||||||
|
Devices: resource.DeviceClaim{
|
||||||
|
Requests: []resource.DeviceRequest{
|
||||||
|
{
|
||||||
|
Name: "req-0",
|
||||||
|
DeviceClassName: "class",
|
||||||
|
AllocationMode: resource.DeviceAllocationModeAll,
|
||||||
|
AdminAccess: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: resource.ResourceClaimStatus{
|
||||||
|
Allocation: &resource.AllocationResult{
|
||||||
|
Devices: resource.DeviceAllocationResult{
|
||||||
|
Results: []resource.DeviceRequestAllocationResult{
|
||||||
|
{
|
||||||
|
Request: "req-0",
|
||||||
|
Driver: "dra.example.com",
|
||||||
|
Pool: "pool-0",
|
||||||
|
Device: "device-0",
|
||||||
|
AdminAccess: ptr.To(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStrategy(t *testing.T) {
|
func TestStrategy(t *testing.T) {
|
||||||
@ -47,6 +147,7 @@ func TestStrategyCreate(t *testing.T) {
|
|||||||
|
|
||||||
testcases := map[string]struct {
|
testcases := map[string]struct {
|
||||||
obj *resource.ResourceClaim
|
obj *resource.ResourceClaim
|
||||||
|
adminAccess bool
|
||||||
expectValidationError bool
|
expectValidationError bool
|
||||||
expectObj *resource.ResourceClaim
|
expectObj *resource.ResourceClaim
|
||||||
}{
|
}{
|
||||||
@ -62,10 +163,22 @@ func TestStrategyCreate(t *testing.T) {
|
|||||||
}(),
|
}(),
|
||||||
expectValidationError: true,
|
expectValidationError: true,
|
||||||
},
|
},
|
||||||
|
"drop-fields-admin-access": {
|
||||||
|
obj: objWithAdminAccess,
|
||||||
|
adminAccess: false,
|
||||||
|
expectObj: obj,
|
||||||
|
},
|
||||||
|
"keep-fields-admin-access": {
|
||||||
|
obj: objWithAdminAccess,
|
||||||
|
adminAccess: true,
|
||||||
|
expectObj: objWithAdminAccess,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testcases {
|
for name, tc := range testcases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAAdminAccess, tc.adminAccess)
|
||||||
|
|
||||||
obj := tc.obj.DeepCopy()
|
obj := tc.obj.DeepCopy()
|
||||||
Strategy.PrepareForCreate(ctx, obj)
|
Strategy.PrepareForCreate(ctx, obj)
|
||||||
if errs := Strategy.Validate(ctx, obj); len(errs) != 0 {
|
if errs := Strategy.Validate(ctx, obj); len(errs) != 0 {
|
||||||
@ -91,6 +204,7 @@ func TestStrategyUpdate(t *testing.T) {
|
|||||||
testcases := map[string]struct {
|
testcases := map[string]struct {
|
||||||
oldObj *resource.ResourceClaim
|
oldObj *resource.ResourceClaim
|
||||||
newObj *resource.ResourceClaim
|
newObj *resource.ResourceClaim
|
||||||
|
adminAccess bool
|
||||||
expectValidationError bool
|
expectValidationError bool
|
||||||
expectObj *resource.ResourceClaim
|
expectObj *resource.ResourceClaim
|
||||||
}{
|
}{
|
||||||
@ -108,10 +222,30 @@ func TestStrategyUpdate(t *testing.T) {
|
|||||||
}(),
|
}(),
|
||||||
expectValidationError: true,
|
expectValidationError: true,
|
||||||
},
|
},
|
||||||
|
"drop-fields-admin-access": {
|
||||||
|
oldObj: obj,
|
||||||
|
newObj: objWithAdminAccess,
|
||||||
|
adminAccess: false,
|
||||||
|
expectObj: obj,
|
||||||
|
},
|
||||||
|
"keep-fields-admin-access": {
|
||||||
|
oldObj: obj,
|
||||||
|
newObj: objWithAdminAccess,
|
||||||
|
adminAccess: true,
|
||||||
|
expectValidationError: true, // Spec is immutable.
|
||||||
|
},
|
||||||
|
"keep-existing-fields-admin-access": {
|
||||||
|
oldObj: objWithAdminAccess,
|
||||||
|
newObj: objWithAdminAccess,
|
||||||
|
adminAccess: true,
|
||||||
|
expectObj: objWithAdminAccess,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testcases {
|
for name, tc := range testcases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAAdminAccess, tc.adminAccess)
|
||||||
|
|
||||||
oldObj := tc.oldObj.DeepCopy()
|
oldObj := tc.oldObj.DeepCopy()
|
||||||
newObj := tc.newObj.DeepCopy()
|
newObj := tc.newObj.DeepCopy()
|
||||||
newObj.ResourceVersion = "4"
|
newObj.ResourceVersion = "4"
|
||||||
@ -143,6 +277,7 @@ func TestStatusStrategyUpdate(t *testing.T) {
|
|||||||
testcases := map[string]struct {
|
testcases := map[string]struct {
|
||||||
oldObj *resource.ResourceClaim
|
oldObj *resource.ResourceClaim
|
||||||
newObj *resource.ResourceClaim
|
newObj *resource.ResourceClaim
|
||||||
|
adminAccess bool
|
||||||
expectValidationError bool
|
expectValidationError bool
|
||||||
expectObj *resource.ResourceClaim
|
expectObj *resource.ResourceClaim
|
||||||
}{
|
}{
|
||||||
@ -172,10 +307,52 @@ func TestStatusStrategyUpdate(t *testing.T) {
|
|||||||
}(),
|
}(),
|
||||||
expectObj: obj,
|
expectObj: obj,
|
||||||
},
|
},
|
||||||
|
"drop-fields-admin-access": {
|
||||||
|
oldObj: obj,
|
||||||
|
newObj: objWithAdminAccessStatus,
|
||||||
|
adminAccess: false,
|
||||||
|
expectObj: objWithStatus,
|
||||||
|
},
|
||||||
|
"keep-fields-admin-access": {
|
||||||
|
oldObj: obj,
|
||||||
|
newObj: objWithAdminAccessStatus,
|
||||||
|
adminAccess: true,
|
||||||
|
expectObj: func() *resource.ResourceClaim {
|
||||||
|
expectObj := objWithAdminAccessStatus.DeepCopy()
|
||||||
|
// Spec remains unchanged.
|
||||||
|
expectObj.Spec = obj.Spec
|
||||||
|
return expectObj
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
"keep-fields-admin-access-because-of-spec": {
|
||||||
|
oldObj: objWithAdminAccess,
|
||||||
|
newObj: objWithAdminAccessStatus,
|
||||||
|
adminAccess: false,
|
||||||
|
expectObj: objWithAdminAccessStatus,
|
||||||
|
},
|
||||||
|
// Normally a claim without admin access in the spec shouldn't
|
||||||
|
// have one in the status either, but it's not invalid and thus
|
||||||
|
// let's test this.
|
||||||
|
"keep-fields-admin-access-because-of-status": {
|
||||||
|
oldObj: func() *resource.ResourceClaim {
|
||||||
|
oldObj := objWithAdminAccessStatus.DeepCopy()
|
||||||
|
oldObj.Spec.Devices.Requests[0].AdminAccess = false
|
||||||
|
return oldObj
|
||||||
|
}(),
|
||||||
|
newObj: objWithAdminAccessStatus,
|
||||||
|
adminAccess: false,
|
||||||
|
expectObj: func() *resource.ResourceClaim {
|
||||||
|
oldObj := objWithAdminAccessStatus.DeepCopy()
|
||||||
|
oldObj.Spec.Devices.Requests[0].AdminAccess = false
|
||||||
|
return oldObj
|
||||||
|
}(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testcases {
|
for name, tc := range testcases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAAdminAccess, tc.adminAccess)
|
||||||
|
|
||||||
oldObj := tc.oldObj.DeepCopy()
|
oldObj := tc.oldObj.DeepCopy()
|
||||||
newObj := tc.newObj.DeepCopy()
|
newObj := tc.newObj.DeepCopy()
|
||||||
newObj.ResourceVersion = "4"
|
newObj.ResourceVersion = "4"
|
||||||
|
@ -26,9 +26,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
"k8s.io/apiserver/pkg/registry/generic"
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource"
|
"k8s.io/kubernetes/pkg/apis/resource"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
// resourceClaimTemplateStrategy implements behavior for ResourceClaimTemplate objects
|
// resourceClaimTemplateStrategy implements behavior for ResourceClaimTemplate objects
|
||||||
@ -44,6 +46,8 @@ func (resourceClaimTemplateStrategy) NamespaceScoped() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (resourceClaimTemplateStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
func (resourceClaimTemplateStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||||
|
claimTemplate := obj.(*resource.ResourceClaimTemplate)
|
||||||
|
dropDisabledFields(claimTemplate, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (resourceClaimTemplateStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
func (resourceClaimTemplateStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||||
@ -63,6 +67,8 @@ func (resourceClaimTemplateStrategy) AllowCreateOnUpdate() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (resourceClaimTemplateStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
func (resourceClaimTemplateStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||||
|
claimTemplate, oldClaimTemplate := obj.(*resource.ResourceClaimTemplate), old.(*resource.ResourceClaimTemplate)
|
||||||
|
dropDisabledFields(claimTemplate, oldClaimTemplate)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (resourceClaimTemplateStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
func (resourceClaimTemplateStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||||
@ -92,3 +98,20 @@ func toSelectableFields(template *resource.ResourceClaimTemplate) fields.Set {
|
|||||||
fields := generic.ObjectMetaFieldsSet(&template.ObjectMeta, true)
|
fields := generic.ObjectMetaFieldsSet(&template.ObjectMeta, true)
|
||||||
return fields
|
return fields
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func dropDisabledFields(newClaimTemplate, oldClaimTemplate *resource.ResourceClaimTemplate) {
|
||||||
|
dropDisabledDRAAdminAccessFields(newClaimTemplate, oldClaimTemplate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dropDisabledDRAAdminAccessFields(newClaimTemplate, oldClaimTemplate *resource.ResourceClaimTemplate) {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess) {
|
||||||
|
// No need to drop anything.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range newClaimTemplate.Spec.Spec.Devices.Requests {
|
||||||
|
if oldClaimTemplate == nil || i >= len(oldClaimTemplate.Spec.Spec.Devices.Requests) || !oldClaimTemplate.Spec.Spec.Devices.Requests[i].AdminAccess {
|
||||||
|
newClaimTemplate.Spec.Spec.Devices.Requests[i].AdminAccess = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -19,16 +19,55 @@ package resourceclaimtemplate
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/apis/resource"
|
"k8s.io/kubernetes/pkg/apis/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
var resourceClaimTemplate = &resource.ResourceClaimTemplate{
|
var obj = &resource.ResourceClaimTemplate{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "valid-claim-template",
|
Name: "valid-claim-template",
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
},
|
},
|
||||||
|
Spec: resource.ResourceClaimTemplateSpec{
|
||||||
|
Spec: resource.ResourceClaimSpec{
|
||||||
|
Devices: resource.DeviceClaim{
|
||||||
|
Requests: []resource.DeviceRequest{
|
||||||
|
{
|
||||||
|
Name: "req-0",
|
||||||
|
DeviceClassName: "class",
|
||||||
|
AllocationMode: resource.DeviceAllocationModeAll,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var objWithAdminAccess = &resource.ResourceClaimTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "valid-claim-template",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: resource.ResourceClaimTemplateSpec{
|
||||||
|
Spec: resource.ResourceClaimSpec{
|
||||||
|
Devices: resource.DeviceClaim{
|
||||||
|
Requests: []resource.DeviceRequest{
|
||||||
|
{
|
||||||
|
Name: "req-0",
|
||||||
|
DeviceClassName: "class",
|
||||||
|
AllocationMode: resource.DeviceAllocationModeAll,
|
||||||
|
AdminAccess: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClaimTemplateStrategy(t *testing.T) {
|
func TestClaimTemplateStrategy(t *testing.T) {
|
||||||
@ -42,19 +81,64 @@ func TestClaimTemplateStrategy(t *testing.T) {
|
|||||||
|
|
||||||
func TestClaimTemplateStrategyCreate(t *testing.T) {
|
func TestClaimTemplateStrategyCreate(t *testing.T) {
|
||||||
ctx := genericapirequest.NewDefaultContext()
|
ctx := genericapirequest.NewDefaultContext()
|
||||||
resourceClaimTemplate := resourceClaimTemplate.DeepCopy()
|
|
||||||
|
|
||||||
Strategy.PrepareForCreate(ctx, resourceClaimTemplate)
|
testcases := map[string]struct {
|
||||||
errs := Strategy.Validate(ctx, resourceClaimTemplate)
|
obj *resource.ResourceClaimTemplate
|
||||||
if len(errs) != 0 {
|
adminAccess bool
|
||||||
t.Errorf("unexpected error validating for create %v", errs)
|
expectValidationError bool
|
||||||
|
expectObj *resource.ResourceClaimTemplate
|
||||||
|
}{
|
||||||
|
"simple": {
|
||||||
|
obj: obj,
|
||||||
|
expectObj: obj,
|
||||||
|
},
|
||||||
|
"validation-error": {
|
||||||
|
obj: func() *resource.ResourceClaimTemplate {
|
||||||
|
obj := obj.DeepCopy()
|
||||||
|
obj.Name = "%#@$%$"
|
||||||
|
return obj
|
||||||
|
}(),
|
||||||
|
expectValidationError: true,
|
||||||
|
},
|
||||||
|
"drop-fields-admin-access": {
|
||||||
|
obj: objWithAdminAccess,
|
||||||
|
adminAccess: false,
|
||||||
|
expectObj: obj,
|
||||||
|
},
|
||||||
|
"keep-fields-admin-access": {
|
||||||
|
obj: objWithAdminAccess,
|
||||||
|
adminAccess: true,
|
||||||
|
expectObj: objWithAdminAccess,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range testcases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAAdminAccess, tc.adminAccess)
|
||||||
|
|
||||||
|
obj := tc.obj.DeepCopy()
|
||||||
|
Strategy.PrepareForCreate(ctx, obj)
|
||||||
|
if errs := Strategy.Validate(ctx, obj); len(errs) != 0 {
|
||||||
|
if !tc.expectValidationError {
|
||||||
|
t.Fatalf("unexpected validation errors: %q", errs)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
} else if tc.expectValidationError {
|
||||||
|
t.Fatal("expected validation error(s), got none")
|
||||||
|
}
|
||||||
|
if warnings := Strategy.WarningsOnCreate(ctx, obj); len(warnings) != 0 {
|
||||||
|
t.Fatalf("unexpected warnings: %q", warnings)
|
||||||
|
}
|
||||||
|
Strategy.Canonicalize(obj)
|
||||||
|
assert.Equal(t, tc.expectObj, obj)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClaimTemplateStrategyUpdate(t *testing.T) {
|
func TestClaimTemplateStrategyUpdate(t *testing.T) {
|
||||||
t.Run("no-changes-okay", func(t *testing.T) {
|
t.Run("no-changes-okay", func(t *testing.T) {
|
||||||
ctx := genericapirequest.NewDefaultContext()
|
ctx := genericapirequest.NewDefaultContext()
|
||||||
resourceClaimTemplate := resourceClaimTemplate.DeepCopy()
|
resourceClaimTemplate := obj.DeepCopy()
|
||||||
newClaimTemplate := resourceClaimTemplate.DeepCopy()
|
newClaimTemplate := resourceClaimTemplate.DeepCopy()
|
||||||
newClaimTemplate.ResourceVersion = "4"
|
newClaimTemplate.ResourceVersion = "4"
|
||||||
|
|
||||||
@ -67,7 +151,7 @@ func TestClaimTemplateStrategyUpdate(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("name-change-not-allowed", func(t *testing.T) {
|
t.Run("name-change-not-allowed", func(t *testing.T) {
|
||||||
ctx := genericapirequest.NewDefaultContext()
|
ctx := genericapirequest.NewDefaultContext()
|
||||||
resourceClaimTemplate := resourceClaimTemplate.DeepCopy()
|
resourceClaimTemplate := obj.DeepCopy()
|
||||||
newClaimTemplate := resourceClaimTemplate.DeepCopy()
|
newClaimTemplate := resourceClaimTemplate.DeepCopy()
|
||||||
newClaimTemplate.Name = "valid-class-2"
|
newClaimTemplate.Name = "valid-class-2"
|
||||||
newClaimTemplate.ResourceVersion = "4"
|
newClaimTemplate.ResourceVersion = "4"
|
||||||
|
@ -103,6 +103,7 @@ type informationForClaim struct {
|
|||||||
// dynamicResources is a plugin that ensures that ResourceClaims are allocated.
|
// dynamicResources is a plugin that ensures that ResourceClaims are allocated.
|
||||||
type dynamicResources struct {
|
type dynamicResources struct {
|
||||||
enabled bool
|
enabled bool
|
||||||
|
enableAdminAccess bool
|
||||||
enableSchedulingQueueHint bool
|
enableSchedulingQueueHint bool
|
||||||
|
|
||||||
fh framework.Handle
|
fh framework.Handle
|
||||||
@ -175,6 +176,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
|
|||||||
|
|
||||||
pl := &dynamicResources{
|
pl := &dynamicResources{
|
||||||
enabled: true,
|
enabled: true,
|
||||||
|
enableAdminAccess: fts.EnableDRAAdminAccess,
|
||||||
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
|
enableSchedulingQueueHint: fts.EnableSchedulingQueueHint,
|
||||||
|
|
||||||
fh: fh,
|
fh: fh,
|
||||||
@ -527,7 +529,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
|||||||
//
|
//
|
||||||
// Claims are treated as "allocated" if they are in the assume cache
|
// Claims are treated as "allocated" if they are in the assume cache
|
||||||
// or currently their allocation is in-flight.
|
// or currently their allocation is in-flight.
|
||||||
allocator, err := structured.NewAllocator(ctx, allocateClaims, &claimListerForAssumeCache{assumeCache: pl.claimAssumeCache, inFlightAllocations: &pl.inFlightAllocations}, pl.classLister, pl.sliceLister)
|
allocator, err := structured.NewAllocator(ctx, pl.enableAdminAccess, allocateClaims, &claimListerForAssumeCache{assumeCache: pl.claimAssumeCache, inFlightAllocations: &pl.inFlightAllocations}, pl.classLister, pl.sliceLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, statusError(logger, err)
|
return nil, statusError(logger, err)
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ package feature
|
|||||||
// This struct allows us to break the dependency of the plugins on
|
// This struct allows us to break the dependency of the plugins on
|
||||||
// the internal k8s features pkg.
|
// the internal k8s features pkg.
|
||||||
type Features struct {
|
type Features struct {
|
||||||
|
EnableDRAAdminAccess bool
|
||||||
EnableDynamicResourceAllocation bool
|
EnableDynamicResourceAllocation bool
|
||||||
EnableVolumeCapacityPriority bool
|
EnableVolumeCapacityPriority bool
|
||||||
EnableNodeInclusionPolicyInPodTopologySpread bool
|
EnableNodeInclusionPolicyInPodTopologySpread bool
|
||||||
|
@ -46,6 +46,7 @@ import (
|
|||||||
// through the WithFrameworkOutOfTreeRegistry option.
|
// through the WithFrameworkOutOfTreeRegistry option.
|
||||||
func NewInTreeRegistry() runtime.Registry {
|
func NewInTreeRegistry() runtime.Registry {
|
||||||
fts := plfeature.Features{
|
fts := plfeature.Features{
|
||||||
|
EnableDRAAdminAccess: feature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
|
||||||
EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation),
|
EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation),
|
||||||
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
||||||
EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
|
EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
|
||||||
|
@ -415,8 +415,12 @@ message DeviceRequest {
|
|||||||
// all ordinary claims to the device with respect to access modes and
|
// all ordinary claims to the device with respect to access modes and
|
||||||
// any resource allocations.
|
// any resource allocations.
|
||||||
//
|
//
|
||||||
|
// This is an alpha field and requires enabling the DRAAdminAccess
|
||||||
|
// feature gate.
|
||||||
|
//
|
||||||
// +optional
|
// +optional
|
||||||
// +default=false
|
// +default=false
|
||||||
|
// +featureGate=DRAAdminAccess
|
||||||
optional bool adminAccess = 6;
|
optional bool adminAccess = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,12 +461,17 @@ message DeviceRequestAllocationResult {
|
|||||||
// AdminAccess is a copy of the AdminAccess value in the
|
// AdminAccess is a copy of the AdminAccess value in the
|
||||||
// request which caused this device to be allocated.
|
// request which caused this device to be allocated.
|
||||||
//
|
//
|
||||||
// New allocations are required to have this set. Old allocations made
|
// New allocations are required to have this set when the DRAAdminAccess
|
||||||
|
// feature gate is enabled. Old allocations made
|
||||||
// by Kubernetes 1.31 do not have it yet. Clients which want to
|
// by Kubernetes 1.31 do not have it yet. Clients which want to
|
||||||
// support Kubernetes 1.31 need to look up the request and retrieve
|
// support Kubernetes 1.31 need to look up the request and retrieve
|
||||||
// the value from there if this field is not set.
|
// the value from there if this field is not set.
|
||||||
//
|
//
|
||||||
|
// This is an alpha field and requires enabling the DRAAdminAccess
|
||||||
|
// feature gate.
|
||||||
|
//
|
||||||
// +required
|
// +required
|
||||||
|
// +featureGate=DRAAdminAccess
|
||||||
optional bool adminAccess = 5;
|
optional bool adminAccess = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,8 +448,12 @@ type DeviceRequest struct {
|
|||||||
// all ordinary claims to the device with respect to access modes and
|
// all ordinary claims to the device with respect to access modes and
|
||||||
// any resource allocations.
|
// any resource allocations.
|
||||||
//
|
//
|
||||||
|
// This is an alpha field and requires enabling the DRAAdminAccess
|
||||||
|
// feature gate.
|
||||||
|
//
|
||||||
// +optional
|
// +optional
|
||||||
// +default=false
|
// +default=false
|
||||||
|
// +featureGate=DRAAdminAccess
|
||||||
AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"`
|
AdminAccess bool `json:"adminAccess,omitempty" protobuf:"bytes,6,opt,name=adminAccess"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -792,12 +796,17 @@ type DeviceRequestAllocationResult struct {
|
|||||||
// AdminAccess is a copy of the AdminAccess value in the
|
// AdminAccess is a copy of the AdminAccess value in the
|
||||||
// request which caused this device to be allocated.
|
// request which caused this device to be allocated.
|
||||||
//
|
//
|
||||||
// New allocations are required to have this set. Old allocations made
|
// New allocations are required to have this set when the DRAAdminAccess
|
||||||
|
// feature gate is enabled. Old allocations made
|
||||||
// by Kubernetes 1.31 do not have it yet. Clients which want to
|
// by Kubernetes 1.31 do not have it yet. Clients which want to
|
||||||
// support Kubernetes 1.31 need to look up the request and retrieve
|
// support Kubernetes 1.31 need to look up the request and retrieve
|
||||||
// the value from there if this field is not set.
|
// the value from there if this field is not set.
|
||||||
//
|
//
|
||||||
|
// This is an alpha field and requires enabling the DRAAdminAccess
|
||||||
|
// feature gate.
|
||||||
|
//
|
||||||
// +required
|
// +required
|
||||||
|
// +featureGate=DRAAdminAccess
|
||||||
AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"`
|
AdminAccess *bool `json:"adminAccess" protobuf:"bytes,5,name=adminAccess"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ var map_DeviceRequest = map[string]string{
|
|||||||
"selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.",
|
"selectors": "Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered.",
|
||||||
"allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.",
|
"allocationMode": "AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are:\n\n- ExactCount: This request is for a specific number of devices.\n This is the default. The exact number is provided in the\n count field.\n\n- All: This request is for all of the matching devices in a pool.\n Allocation will fail if some devices are already allocated,\n unless adminAccess is requested.\n\nIf AlloctionMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field.\n\nMore modes may get added in the future. Clients must refuse to handle requests with unknown modes.",
|
||||||
"count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.",
|
"count": "Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one.",
|
||||||
"adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.",
|
"adminAccess": "AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (DeviceRequest) SwaggerDoc() map[string]string {
|
func (DeviceRequest) SwaggerDoc() map[string]string {
|
||||||
@ -195,7 +195,7 @@ var map_DeviceRequestAllocationResult = map[string]string{
|
|||||||
"driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
|
"driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
|
||||||
"pool": "This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
|
"pool": "This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
|
||||||
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
|
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
|
||||||
"adminAccess": "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.",
|
"adminAccess": "AdminAccess is a copy of the AdminAccess value in the request which caused this device to be allocated.\n\nNew allocations are required to have this set when the DRAAdminAccess feature gate is enabled. Old allocations made by Kubernetes 1.31 do not have it yet. Clients which want to support Kubernetes 1.31 need to look up the request and retrieve the value from there if this field is not set.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string {
|
func (DeviceRequestAllocationResult) SwaggerDoc() map[string]string {
|
||||||
|
@ -46,6 +46,7 @@ type ClaimLister interface {
|
|||||||
// available and the current state of the cluster (claims, classes, resource
|
// available and the current state of the cluster (claims, classes, resource
|
||||||
// slices).
|
// slices).
|
||||||
type Allocator struct {
|
type Allocator struct {
|
||||||
|
adminAccessEnabled bool
|
||||||
claimsToAllocate []*resourceapi.ResourceClaim
|
claimsToAllocate []*resourceapi.ResourceClaim
|
||||||
claimLister ClaimLister
|
claimLister ClaimLister
|
||||||
classLister resourcelisters.DeviceClassLister
|
classLister resourcelisters.DeviceClassLister
|
||||||
@ -55,12 +56,14 @@ type Allocator struct {
|
|||||||
// NewAllocator returns an allocator for a certain set of claims or an error if
|
// NewAllocator returns an allocator for a certain set of claims or an error if
|
||||||
// some problem was detected which makes it impossible to allocate claims.
|
// some problem was detected which makes it impossible to allocate claims.
|
||||||
func NewAllocator(ctx context.Context,
|
func NewAllocator(ctx context.Context,
|
||||||
|
adminAccessEnabled bool,
|
||||||
claimsToAllocate []*resourceapi.ResourceClaim,
|
claimsToAllocate []*resourceapi.ResourceClaim,
|
||||||
claimLister ClaimLister,
|
claimLister ClaimLister,
|
||||||
classLister resourcelisters.DeviceClassLister,
|
classLister resourcelisters.DeviceClassLister,
|
||||||
sliceLister resourcelisters.ResourceSliceLister,
|
sliceLister resourcelisters.ResourceSliceLister,
|
||||||
) (*Allocator, error) {
|
) (*Allocator, error) {
|
||||||
return &Allocator{
|
return &Allocator{
|
||||||
|
adminAccessEnabled: adminAccessEnabled,
|
||||||
claimsToAllocate: claimsToAllocate,
|
claimsToAllocate: claimsToAllocate,
|
||||||
claimLister: claimLister,
|
claimLister: claimLister,
|
||||||
classLister: classLister,
|
classLister: classLister,
|
||||||
@ -160,6 +163,10 @@ func (a *Allocator) Allocate(ctx context.Context, node *v1.Node) (finalResult []
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !a.adminAccessEnabled && request.AdminAccess {
|
||||||
|
return nil, fmt.Errorf("claim %s, request %s: admin access is requested, but the feature is disabled", klog.KObj(claim), request.Name)
|
||||||
|
}
|
||||||
|
|
||||||
// Should be set. If it isn't, something changed and we should refuse to proceed.
|
// Should be set. If it isn't, something changed and we should refuse to proceed.
|
||||||
if request.DeviceClassName == "" {
|
if request.DeviceClassName == "" {
|
||||||
return nil, fmt.Errorf("claim %s, request %s: missing device class name (unsupported request type?)", klog.KObj(claim), request.Name)
|
return nil, fmt.Errorf("claim %s, request %s: missing device class name (unsupported request type?)", klog.KObj(claim), request.Name)
|
||||||
|
@ -358,6 +358,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
intAttribute := resourceapi.FullyQualifiedName("numa")
|
intAttribute := resourceapi.FullyQualifiedName("numa")
|
||||||
|
|
||||||
testcases := map[string]struct {
|
testcases := map[string]struct {
|
||||||
|
adminAccess bool
|
||||||
claimsToAllocate []*resourceapi.ResourceClaim
|
claimsToAllocate []*resourceapi.ResourceClaim
|
||||||
allocatedClaims []*resourceapi.ResourceClaim
|
allocatedClaims []*resourceapi.ResourceClaim
|
||||||
classes []*resourceapi.DeviceClass
|
classes []*resourceapi.DeviceClass
|
||||||
@ -930,7 +931,22 @@ func TestAllocator(t *testing.T) {
|
|||||||
|
|
||||||
expectResults: nil,
|
expectResults: nil,
|
||||||
},
|
},
|
||||||
"admin-access": {
|
"admin-access-disabled": {
|
||||||
|
adminAccess: false,
|
||||||
|
claimsToAllocate: func() []*resourceapi.ResourceClaim {
|
||||||
|
c := claim(claim0, req0, classA)
|
||||||
|
c.Spec.Devices.Requests[0].AdminAccess = true
|
||||||
|
return []*resourceapi.ResourceClaim{c}
|
||||||
|
}(),
|
||||||
|
classes: objects(class(classA, driverA)),
|
||||||
|
slices: objects(sliceWithOneDevice(slice1, node1, pool1, driverA)),
|
||||||
|
node: node(node1, region1),
|
||||||
|
|
||||||
|
expectResults: nil,
|
||||||
|
expectError: gomega.MatchError(gomega.ContainSubstring("claim claim-0, request req-0: admin access is requested, but the feature is disabled")),
|
||||||
|
},
|
||||||
|
"admin-access-enabled": {
|
||||||
|
adminAccess: true,
|
||||||
claimsToAllocate: func() []*resourceapi.ResourceClaim {
|
claimsToAllocate: func() []*resourceapi.ResourceClaim {
|
||||||
c := claim(claim0, req0, classA)
|
c := claim(claim0, req0, classA)
|
||||||
c.Spec.Devices.Requests[0].AdminAccess = true
|
c.Spec.Devices.Requests[0].AdminAccess = true
|
||||||
@ -1373,7 +1389,7 @@ func TestAllocator(t *testing.T) {
|
|||||||
classLister.objs = append(classLister.objs, class.DeepCopy())
|
classLister.objs = append(classLister.objs, class.DeepCopy())
|
||||||
}
|
}
|
||||||
|
|
||||||
allocator, err := NewAllocator(ctx, toAllocate.claims, allocated, classLister, sliceLister)
|
allocator, err := NewAllocator(ctx, tc.adminAccess, toAllocate.claims, allocated, classLister, sliceLister)
|
||||||
g.Expect(err).ToNot(gomega.HaveOccurred())
|
g.Expect(err).ToNot(gomega.HaveOccurred())
|
||||||
|
|
||||||
results, err := allocator.Allocate(ctx, tc.node)
|
results, err := allocator.Allocate(ctx, tc.node)
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"github.com/onsi/gomega/types"
|
"github.com/onsi/gomega/types"
|
||||||
|
|
||||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -44,6 +45,7 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/test/e2e/feature"
|
"k8s.io/kubernetes/test/e2e/feature"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
@ -732,7 +734,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
|||||||
driver := NewDriver(f, nodes, networkResources)
|
driver := NewDriver(f, nodes, networkResources)
|
||||||
b := newBuilder(f, driver)
|
b := newBuilder(f, driver)
|
||||||
|
|
||||||
ginkgo.It("support validating admission policy for admin access", func(ctx context.Context) {
|
f.It("support validating admission policy for admin access", feature.DRAAdminAccess, func(ctx context.Context) {
|
||||||
// Create VAP, after making it unique to the current test.
|
// Create VAP, after making it unique to the current test.
|
||||||
adminAccessPolicyYAML := strings.ReplaceAll(adminAccessPolicyYAML, "dra.example.com", b.f.UniqueName)
|
adminAccessPolicyYAML := strings.ReplaceAll(adminAccessPolicyYAML, "dra.example.com", b.f.UniqueName)
|
||||||
driver.createFromYAML(ctx, []byte(adminAccessPolicyYAML), "")
|
driver.createFromYAML(ctx, []byte(adminAccessPolicyYAML), "")
|
||||||
@ -844,6 +846,42 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
|||||||
_, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Create(ctx, claim2, metav1.CreateOptions{})
|
_, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Create(ctx, claim2, metav1.CreateOptions{})
|
||||||
gomega.Expect(err).Should(gomega.MatchError(gomega.ContainSubstring("exceeded quota: object-count, requested: count/resourceclaims.resource.k8s.io=1, used: count/resourceclaims.resource.k8s.io=1, limited: count/resourceclaims.resource.k8s.io=1")), "creating second claim not allowed")
|
gomega.Expect(err).Should(gomega.MatchError(gomega.ContainSubstring("exceeded quota: object-count, requested: count/resourceclaims.resource.k8s.io=1, used: count/resourceclaims.resource.k8s.io=1, limited: count/resourceclaims.resource.k8s.io=1")), "creating second claim not allowed")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
f.It("DaemonSet with admin access", feature.DRAAdminAccess, func(ctx context.Context) {
|
||||||
|
pod, template := b.podInline()
|
||||||
|
template.Spec.Spec.Devices.Requests[0].AdminAccess = true
|
||||||
|
// Limit the daemon set to the one node where we have the driver.
|
||||||
|
nodeName := nodes.NodeNames[0]
|
||||||
|
pod.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": nodeName}
|
||||||
|
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
|
||||||
|
daemonSet := &appsv1.DaemonSet{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "monitoring-ds",
|
||||||
|
},
|
||||||
|
Spec: appsv1.DaemonSetSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{"app": "monitoring"},
|
||||||
|
},
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{"app": "monitoring"},
|
||||||
|
},
|
||||||
|
Spec: pod.Spec,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
created := b.create(ctx, template, daemonSet)
|
||||||
|
if !created[0].(*resourceapi.ResourceClaimTemplate).Spec.Spec.Devices.Requests[0].AdminAccess {
|
||||||
|
framework.Fail("AdminAccess field was cleared. This test depends on the DRAAdminAccess feature.")
|
||||||
|
}
|
||||||
|
ds := created[1].(*appsv1.DaemonSet)
|
||||||
|
|
||||||
|
gomega.Eventually(ctx, func(ctx context.Context) (bool, error) {
|
||||||
|
return e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{nodeName})(ctx)
|
||||||
|
}).WithTimeout(f.Timeouts.PodStart).Should(gomega.BeTrueBecause("DaemonSet pod should be running on node %s but isn't", nodeName))
|
||||||
|
framework.ExpectNoError(e2edaemonset.CheckDaemonStatus(ctx, f, daemonSet.Name))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Context("cluster", func() {
|
ginkgo.Context("cluster", func() {
|
||||||
@ -1317,6 +1355,13 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) []klog.KMe
|
|||||||
err := b.f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
|
err := b.f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
|
||||||
framework.ExpectNoError(err, "delete node resource slice")
|
framework.ExpectNoError(err, "delete node resource slice")
|
||||||
})
|
})
|
||||||
|
case *appsv1.DaemonSet:
|
||||||
|
createdObj, err = b.f.ClientSet.AppsV1().DaemonSets(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||||
|
// Cleanup not really needed, but speeds up namespace shutdown.
|
||||||
|
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||||
|
err := b.f.ClientSet.AppsV1().DaemonSets(b.f.Namespace.Name).Delete(ctx, obj.Name, metav1.DeleteOptions{})
|
||||||
|
framework.ExpectNoError(err, "delete daemonset")
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
framework.Fail(fmt.Sprintf("internal error, unsupported type %T", obj), 1)
|
framework.Fail(fmt.Sprintf("internal error, unsupported type %T", obj), 1)
|
||||||
}
|
}
|
||||||
|
@ -91,6 +91,18 @@ var (
|
|||||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||||
Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade"))
|
Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade"))
|
||||||
|
|
||||||
|
// owning-sig: sig-node
|
||||||
|
// kep: https://kep.k8s.io/4381
|
||||||
|
// test-infra jobs:
|
||||||
|
// - "dra-alpha" in https://testgrid.k8s.io/sig-node-dynamic-resource-allocation
|
||||||
|
//
|
||||||
|
// This label is used for tests which need:
|
||||||
|
// - the DynamicResourceAllocation *and* DRAAdminAccess feature gates
|
||||||
|
// - the resource.k8s.io API group
|
||||||
|
// - a container runtime where support for CDI (https://github.com/cncf-tags/container-device-interface)
|
||||||
|
// is enabled such that passing CDI device IDs through CRI fields is supported
|
||||||
|
DRAAdminAccess = framework.WithFeature(framework.ValidFeatures.Add("DRAAdminAccess"))
|
||||||
|
|
||||||
// owning-sig: sig-node
|
// owning-sig: sig-node
|
||||||
// kep: https://kep.k8s.io/4381
|
// kep: https://kep.k8s.io/4381
|
||||||
// test-infra jobs:
|
// test-infra jobs:
|
||||||
|
@ -388,6 +388,12 @@
|
|||||||
lockToDefault: false
|
lockToDefault: false
|
||||||
preRelease: Deprecated
|
preRelease: Deprecated
|
||||||
version: "1.31"
|
version: "1.31"
|
||||||
|
- name: DRAAdminAccess
|
||||||
|
versionedSpecs:
|
||||||
|
- default: false
|
||||||
|
lockToDefault: false
|
||||||
|
preRelease: Alpha
|
||||||
|
version: "1.32"
|
||||||
- name: DynamicResourceAllocation
|
- name: DynamicResourceAllocation
|
||||||
versionedSpecs:
|
versionedSpecs:
|
||||||
- default: false
|
- default: false
|
||||||
|
@ -30,9 +30,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/dynamic-resource-allocation/structured"
|
"k8s.io/dynamic-resource-allocation/structured"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
|
"k8s.io/kubernetes/pkg/scheduler/util/assumecache"
|
||||||
"k8s.io/kubernetes/test/utils/ktesting"
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
@ -304,7 +306,7 @@ claims:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
allocator, err := structured.NewAllocator(tCtx, []*resourceapi.ResourceClaim{claim}, claimLister, classLister, sliceLister)
|
allocator, err := structured.NewAllocator(tCtx, utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess), []*resourceapi.ResourceClaim{claim}, claimLister, classLister, sliceLister)
|
||||||
tCtx.ExpectNoError(err, "create allocator")
|
tCtx.ExpectNoError(err, "create allocator")
|
||||||
|
|
||||||
rand.Shuffle(len(nodes), func(i, j int) {
|
rand.Shuffle(len(nodes), func(i, j int) {
|
||||||
|
@ -132,7 +132,7 @@ func CreateResourceClaimController(ctx context.Context, tb ktesting.TB, clientSe
|
|||||||
podInformer := informerFactory.Core().V1().Pods()
|
podInformer := informerFactory.Core().V1().Pods()
|
||||||
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
|
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
|
||||||
claimTemplateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
claimTemplateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
||||||
claimController, err := resourceclaim.NewController(klog.FromContext(ctx), clientSet, podInformer, claimInformer, claimTemplateInformer)
|
claimController, err := resourceclaim.NewController(klog.FromContext(ctx), true /* admin access */, clientSet, podInformer, claimInformer, claimTemplateInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("Error creating claim controller: %v", err)
|
tb.Fatalf("Error creating claim controller: %v", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user