mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
Merge pull request #115354 from pohly/dra-reserved-for-list-type
dynamic resource allocation: avoid apiserver complaint about list content
This commit is contained in:
commit
c829397f7a
5
api/openapi-spec/swagger.json
generated
5
api/openapi-spec/swagger.json
generated
@ -13136,7 +13136,10 @@
|
||||
"$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference"
|
||||
},
|
||||
"type": "array",
|
||||
"x-kubernetes-list-type": "set"
|
||||
"x-kubernetes-list-map-keys": [
|
||||
"uid"
|
||||
],
|
||||
"x-kubernetes-list-type": "map"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
|
@ -466,7 +466,10 @@
|
||||
"default": {}
|
||||
},
|
||||
"type": "array",
|
||||
"x-kubernetes-list-type": "set"
|
||||
"x-kubernetes-list-map-keys": [
|
||||
"uid"
|
||||
],
|
||||
"x-kubernetes-list-type": "map"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
|
@ -18,6 +18,7 @@ package validation
|
||||
|
||||
import (
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
@ -131,8 +132,7 @@ func ValidateClaimStatusUpdate(resourceClaim, oldClaim *resource.ResourceClaim)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateAllocationResult(resourceClaim.Status.Allocation, fldPath.Child("allocation"))...)
|
||||
allErrs = append(allErrs, validateSliceIsASet(resourceClaim.Status.ReservedFor, resource.ResourceClaimReservedForMaxSize,
|
||||
validateResourceClaimUserReference, fldPath.Child("reservedFor"))...)
|
||||
allErrs = append(allErrs, validateResourceClaimConsumers(resourceClaim.Status.ReservedFor, resource.ResourceClaimReservedForMaxSize, fldPath.Child("reservedFor"))...)
|
||||
|
||||
// Now check for invariants that must be valid for a ResourceClaim.
|
||||
if len(resourceClaim.Status.ReservedFor) > 0 {
|
||||
@ -231,6 +231,28 @@ func validateSliceIsASet[T comparable](slice []T, maxSize int, validateItem func
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateResourceClaimConsumers ensures that the slice contains no duplicate UIDs and does not exceed a certain maximum size.
|
||||
func validateResourceClaimConsumers(consumers []resource.ResourceClaimConsumerReference, maxSize int, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allUIDs := sets.New[types.UID]()
|
||||
for i, consumer := range consumers {
|
||||
idxPath := fldPath.Index(i)
|
||||
if allUIDs.Has(consumer.UID) {
|
||||
allErrs = append(allErrs, field.Duplicate(idxPath.Child("uid"), consumer.UID))
|
||||
} else {
|
||||
allErrs = append(allErrs, validateResourceClaimUserReference(consumer, idxPath)...)
|
||||
allUIDs.Insert(consumer.UID)
|
||||
}
|
||||
}
|
||||
if len(consumers) > maxSize {
|
||||
// Dumping the entire field into the error message is likely to be too long,
|
||||
// in particular when it is already beyond the maximum size. Instead this
|
||||
// just shows the number of entries.
|
||||
allErrs = append(allErrs, field.TooLongMaxLength(fldPath, len(consumers), maxSize))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodScheduling validates a PodScheduling.
|
||||
func ValidatePodScheduling(resourceClaim *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
@ -395,7 +396,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
UID: types.UID(fmt.Sprintf("%d", i)),
|
||||
})
|
||||
}
|
||||
return claim
|
||||
@ -410,7 +411,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
UID: types.UID(fmt.Sprintf("%d", i)),
|
||||
})
|
||||
}
|
||||
return claim
|
||||
@ -425,19 +426,15 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
UID: types.UID(fmt.Sprintf("%d", i)),
|
||||
})
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-duplicate": {
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "reservedFor").Index(1), resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
})},
|
||||
oldClaim: validAllocatedClaim,
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "reservedFor").Index(1).Child("uid"), types.UID("1"))},
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
for i := 0; i < 2; i++ {
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
@ -463,7 +460,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
UID: types.UID(fmt.Sprintf("%d", i)),
|
||||
})
|
||||
}
|
||||
return claim
|
||||
|
5
pkg/generated/openapi/zz_generated.openapi.go
generated
5
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -41280,7 +41280,10 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimStatus(ref common.Reference
|
||||
"reservedFor": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-type": "set",
|
||||
"x-kubernetes-list-map-keys": []interface{}{
|
||||
"uid",
|
||||
},
|
||||
"x-kubernetes-list-type": "map",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
|
@ -248,7 +248,8 @@ message ResourceClaimStatus {
|
||||
// There can be at most 32 such reservations. This may get increased in
|
||||
// the future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +listType=map
|
||||
// +listMapKey=uid
|
||||
// +optional
|
||||
repeated ResourceClaimConsumerReference reservedFor = 3;
|
||||
|
||||
|
@ -112,7 +112,8 @@ type ResourceClaimStatus struct {
|
||||
// There can be at most 32 such reservations. This may get increased in
|
||||
// the future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +listType=map
|
||||
// +listMapKey=uid
|
||||
// +optional
|
||||
ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor"`
|
||||
|
||||
|
@ -11415,6 +11415,8 @@ var schemaYAML = typed.YAMLObject(`types:
|
||||
elementType:
|
||||
namedType: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference
|
||||
elementRelationship: associative
|
||||
keys:
|
||||
- uid
|
||||
- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplate
|
||||
map:
|
||||
fields:
|
||||
|
Loading…
Reference in New Issue
Block a user