dynamic resource allocation: avoid apiserver complaint about list content

This fixes the following warning (error?) in the apiserver:

E0126 18:10:38.665239   16370 fieldmanager.go:210] "[SHOULD NOT HAPPEN] failed to update managedFields" err="failed to convert new object (test/claim-84; resource.k8s.io/v1alpha1, Kind=ResourceClaim) to smd typed: .status.reservedFor: element 0: associative list without keys has an element that's a map type" VersionKind="/, Kind=" namespace="test" name="claim-84"

The root cause is the same as in e50e8a0c91:
nothing in Kubernetes outright complains about a list of items where the item
type is comparable in Go, but not a simple type. This nonetheless isn't
supposed to be done in the API and can causes problems elsewhere.

For the ReservedFor field, everything seems to work okay except for the
warning. However, it's better to follow conventions and use a map. This is
possible in this case because UID is guaranteed to be a unique key.

Validation is now stricter than before, which is a good thing: previously,
two entries with the same UID were allowed as long as some other field was
different, which wasn't a situation that should have been allowed.
This commit is contained in:
Patrick Ohly 2023-01-26 20:37:00 +01:00
parent d29e3bd7aa
commit 508cd60760
8 changed files with 49 additions and 17 deletions

View File

@ -13136,7 +13136,10 @@
"$ref": "#/definitions/io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference"
},
"type": "array",
"x-kubernetes-list-type": "set"
"x-kubernetes-list-map-keys": [
"uid"
],
"x-kubernetes-list-type": "map"
}
},
"type": "object"

View File

@ -466,7 +466,10 @@
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "set"
"x-kubernetes-list-map-keys": [
"uid"
],
"x-kubernetes-list-type": "map"
}
},
"type": "object"

View File

@ -18,6 +18,7 @@ package validation
import (
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
@ -131,8 +132,7 @@ func ValidateClaimStatusUpdate(resourceClaim, oldClaim *resource.ResourceClaim)
}
allErrs = append(allErrs, validateAllocationResult(resourceClaim.Status.Allocation, fldPath.Child("allocation"))...)
allErrs = append(allErrs, validateSliceIsASet(resourceClaim.Status.ReservedFor, resource.ResourceClaimReservedForMaxSize,
validateResourceClaimUserReference, fldPath.Child("reservedFor"))...)
allErrs = append(allErrs, validateResourceClaimConsumers(resourceClaim.Status.ReservedFor, resource.ResourceClaimReservedForMaxSize, fldPath.Child("reservedFor"))...)
// Now check for invariants that must be valid for a ResourceClaim.
if len(resourceClaim.Status.ReservedFor) > 0 {
@ -231,6 +231,28 @@ func validateSliceIsASet[T comparable](slice []T, maxSize int, validateItem func
return allErrs
}
// validateResourceClaimConsumers ensures that the slice contains no duplicate UIDs and does not exceed a certain maximum size.
func validateResourceClaimConsumers(consumers []resource.ResourceClaimConsumerReference, maxSize int, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
allUIDs := sets.New[types.UID]()
for i, consumer := range consumers {
idxPath := fldPath.Index(i)
if allUIDs.Has(consumer.UID) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("uid"), consumer.UID))
} else {
allErrs = append(allErrs, validateResourceClaimUserReference(consumer, idxPath)...)
allUIDs.Insert(consumer.UID)
}
}
if len(consumers) > maxSize {
// Dumping the entire field into the error message is likely to be too long,
// in particular when it is already beyond the maximum size. Instead this
// just shows the number of entries.
allErrs = append(allErrs, field.TooLongMaxLength(fldPath, len(consumers), maxSize))
}
return allErrs
}
// ValidatePodScheduling validates a PodScheduling.
func ValidatePodScheduling(resourceClaim *resource.PodScheduling) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))

View File

@ -24,6 +24,7 @@ import (
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/resource"
@ -395,7 +396,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
UID: types.UID(fmt.Sprintf("%d", i)),
})
}
return claim
@ -410,7 +411,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
UID: types.UID(fmt.Sprintf("%d", i)),
})
}
return claim
@ -425,19 +426,15 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
UID: types.UID(fmt.Sprintf("%d", i)),
})
}
return claim
},
},
"invalid-reserved-for-duplicate": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "reservedFor").Index(1), resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: "foo",
UID: "1",
})},
oldClaim: validAllocatedClaim,
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "reservedFor").Index(1).Child("uid"), types.UID("1"))},
oldClaim: validAllocatedClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
for i := 0; i < 2; i++ {
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
@ -463,7 +460,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
resource.ResourceClaimConsumerReference{
Resource: "pods",
Name: fmt.Sprintf("foo-%d", i),
UID: "1",
UID: types.UID(fmt.Sprintf("%d", i)),
})
}
return claim

View File

@ -41280,7 +41280,10 @@ func schema_k8sio_api_resource_v1alpha1_ResourceClaimStatus(ref common.Reference
"reservedFor": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "set",
"x-kubernetes-list-map-keys": []interface{}{
"uid",
},
"x-kubernetes-list-type": "map",
},
},
SchemaProps: spec.SchemaProps{

View File

@ -248,7 +248,8 @@ message ResourceClaimStatus {
// There can be at most 32 such reservations. This may get increased in
// the future, but not reduced.
//
// +listType=set
// +listType=map
// +listMapKey=uid
// +optional
repeated ResourceClaimConsumerReference reservedFor = 3;

View File

@ -112,7 +112,8 @@ type ResourceClaimStatus struct {
// There can be at most 32 such reservations. This may get increased in
// the future, but not reduced.
//
// +listType=set
// +listType=map
// +listMapKey=uid
// +optional
ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor"`

View File

@ -11415,6 +11415,8 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.resource.v1alpha1.ResourceClaimConsumerReference
elementRelationship: associative
keys:
- uid
- name: io.k8s.api.resource.v1alpha1.ResourceClaimTemplate
map:
fields: