mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
DRA quota: add ResourceClaim v1.ResourceQuota limits
Dynamic resource allocation is similar to storage in the sense that users create ResourceClaim objects to request resources, same as with persistent volume claims. The actual resource usage is only known when allocating claims, but some limits can already be enforced at admission time: - "count/resourceclaims.resource.k8s.io" limits the number of ResourceClaim objects in a namespace; this is a generic feature that is already supported also without this commit. - "resourceclaims" is *not* an alias - use "count/resourceclaims.resource.k8s.io" instead. - <device-class-name>.deviceclass.resource.k8s.io/devices limits the number of ResourceClaim objects in a namespace such that the number of devices requested through those objects with that class does not exceed the limit. A single request may cause the allocation of multiple devices. For exact counts, the quota limit is based on the sum of those exact counts. For requests asking for "all" matching devices, the maximum number of allocated devices per claim is used as a worst-case upper bound. Requests asking for "admin access" contribute to the quota. DRA quota: remove admin mode exception
This commit is contained in:
parent
1f43a80b3c
commit
299ecde5cc
@ -5799,6 +5799,8 @@ const (
|
||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||
// resource.k8s.io devices requested with a certain DeviceClass, number
|
||||
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
|
||||
)
|
||||
|
||||
// The following identify resource prefix for Kubernetes object types
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@ -40,6 +42,10 @@ func NewEvaluators(f quota.ListerForResourceFunc) []quota.Evaluator {
|
||||
NewServiceEvaluator(f),
|
||||
NewPersistentVolumeClaimEvaluator(f),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||
result = append(result, NewResourceClaimEvaluator(f))
|
||||
}
|
||||
|
||||
// these evaluators require an alias for backwards compatibility
|
||||
for gvr, alias := range legacyObjectCountAliases {
|
||||
result = append(result,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -21,75 +21,53 @@ import (
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
k8sfeatures "k8s.io/kubernetes/pkg/features"
|
||||
resourceinternal "k8s.io/kubernetes/pkg/apis/resource"
|
||||
resourceversioned "k8s.io/kubernetes/pkg/apis/resource/v1alpha3"
|
||||
)
|
||||
|
||||
// the name used for object count quota
|
||||
var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource())
|
||||
// The name used for object count quota. This evaluator takes over counting
|
||||
// those because of it's GroupResource, so it has to implement this
|
||||
// count.
|
||||
var ClaimObjectCountName = generic.ObjectCountQuotaResourceNameFor(resourceapi.SchemeGroupVersion.WithResource("resourceclaims").GroupResource())
|
||||
|
||||
// pvcResources are the set of static resources managed by quota associated with pvcs.
|
||||
// for each resource in this list, it may be refined dynamically based on storage class.
|
||||
var pvcResources = []corev1.ResourceName{
|
||||
corev1.ResourcePersistentVolumeClaims,
|
||||
corev1.ResourceRequestsStorage,
|
||||
// V1ResourceByDeviceClass returns a quota resource name by device class.
|
||||
func V1ResourceByDeviceClass(className string) corev1.ResourceName {
|
||||
return corev1.ResourceName(className + corev1.ResourceClaimsPerClass)
|
||||
}
|
||||
|
||||
// storageClassSuffix is the suffix to the qualified portion of storage class resource name.
|
||||
// For example, if you want to quota storage by storage class, you would have a declaration
|
||||
// that follows <storage-class>.storageclass.storage.k8s.io/<resource>.
|
||||
// For example:
|
||||
// * gold.storageclass.storage.k8s.io/: 500Gi
|
||||
// * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi
|
||||
const storageClassSuffix string = ".storageclass.storage.k8s.io/"
|
||||
|
||||
/* TODO: prune?
|
||||
// ResourceByStorageClass returns a quota resource name by storage class.
|
||||
func ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName {
|
||||
return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
|
||||
}
|
||||
*/
|
||||
|
||||
// V1ResourceByStorageClass returns a quota resource name by storage class.
|
||||
func V1ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName {
|
||||
return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
|
||||
// NewResourceClaimEvaluator returns an evaluator that can evaluate resource claims
|
||||
func NewResourceClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator {
|
||||
listFuncByNamespace := generic.ListResourceUsingListerFunc(f, resourceapi.SchemeGroupVersion.WithResource("resourceclaims"))
|
||||
claimEvaluator := &claimEvaluator{listFuncByNamespace: listFuncByNamespace}
|
||||
return claimEvaluator
|
||||
}
|
||||
|
||||
// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims
|
||||
func NewPersistentVolumeClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator {
|
||||
listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims"))
|
||||
pvcEvaluator := &pvcEvaluator{listFuncByNamespace: listFuncByNamespace}
|
||||
return pvcEvaluator
|
||||
}
|
||||
|
||||
// pvcEvaluator knows how to evaluate quota usage for persistent volume claims
|
||||
type pvcEvaluator struct {
|
||||
// listFuncByNamespace knows how to list pvc claims
|
||||
// claimEvaluator knows how to evaluate quota usage for resource claims
|
||||
type claimEvaluator struct {
|
||||
// listFuncByNamespace knows how to list resource claims
|
||||
listFuncByNamespace generic.ListFuncByNamespace
|
||||
}
|
||||
|
||||
// Constraints verifies that all required resources are present on the item.
|
||||
func (p *pvcEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
|
||||
// no-op for persistent volume claims
|
||||
func (p *claimEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
|
||||
// no-op for resource claims
|
||||
return nil
|
||||
}
|
||||
|
||||
// GroupResource that this evaluator tracks
|
||||
func (p *pvcEvaluator) GroupResource() schema.GroupResource {
|
||||
return corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()
|
||||
func (p *claimEvaluator) GroupResource() schema.GroupResource {
|
||||
return resourceapi.SchemeGroupVersion.WithResource("resourceclaims").GroupResource()
|
||||
}
|
||||
|
||||
// Handles returns true if the evaluator should handle the specified operation.
|
||||
func (p *pvcEvaluator) Handles(a admission.Attributes) bool {
|
||||
func (p *claimEvaluator) Handles(a admission.Attributes) bool {
|
||||
op := a.GetOperation()
|
||||
if op == admission.Create {
|
||||
return true
|
||||
@ -101,137 +79,84 @@ func (p *pvcEvaluator) Handles(a admission.Attributes) bool {
|
||||
}
|
||||
|
||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||
func (p *pvcEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
|
||||
func (p *claimEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
|
||||
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
|
||||
}
|
||||
|
||||
// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches.
|
||||
func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
func (p *claimEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
|
||||
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
|
||||
func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
func (p *claimEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||
func (p *pvcEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName {
|
||||
func (p *claimEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName {
|
||||
result := []corev1.ResourceName{}
|
||||
for _, item := range items {
|
||||
// match object count quota fields
|
||||
if quota.Contains([]corev1.ResourceName{pvcObjectCountName}, item) {
|
||||
if item == ClaimObjectCountName /* object count quota fields */ ||
|
||||
strings.HasSuffix(string(item), corev1.ResourceClaimsPerClass /* by device class */) {
|
||||
result = append(result, item)
|
||||
continue
|
||||
}
|
||||
// match pvc resources
|
||||
if quota.Contains(pvcResources, item) {
|
||||
result = append(result, item)
|
||||
continue
|
||||
}
|
||||
// match pvc resources scoped by storage class (<storage-class-name>.storageclass.storage.k8s.io/<resource>)
|
||||
for _, resource := range pvcResources {
|
||||
byStorageClass := storageClassSuffix + string(resource)
|
||||
if strings.HasSuffix(string(item), byStorageClass) {
|
||||
result = append(result, item)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Usage knows how to measure usage associated with item.
|
||||
func (p *pvcEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) {
|
||||
func (p *claimEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) {
|
||||
result := corev1.ResourceList{}
|
||||
pvc, err := toExternalPersistentVolumeClaimOrError(item)
|
||||
claim, err := toExternalResourceClaimOrError(item)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// charge for claim
|
||||
result[corev1.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
result[pvcObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
storageClassRef := storagehelpers.GetPersistentVolumeClaimClass(pvc)
|
||||
if len(storageClassRef) > 0 {
|
||||
storageClassClaim := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourcePersistentVolumeClaims))
|
||||
result[storageClassClaim] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
}
|
||||
|
||||
requestedStorage := p.getStorageUsage(pvc)
|
||||
if requestedStorage != nil {
|
||||
result[corev1.ResourceRequestsStorage] = *requestedStorage
|
||||
// charge usage to the storage class (if present)
|
||||
if len(storageClassRef) > 0 {
|
||||
storageClassStorage := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourceRequestsStorage))
|
||||
result[storageClassStorage] = *requestedStorage
|
||||
result[ClaimObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
for _, request := range claim.Spec.Devices.Requests {
|
||||
deviceClassClaim := V1ResourceByDeviceClass(request.DeviceClassName)
|
||||
var numDevices int64
|
||||
switch request.AllocationMode {
|
||||
case resourceapi.DeviceAllocationModeExactCount:
|
||||
numDevices = request.Count
|
||||
case resourceapi.DeviceAllocationModeAll:
|
||||
// Worst case...
|
||||
numDevices = resourceapi.AllocationResultsMaxSize
|
||||
default:
|
||||
// Could happen after a downgrade. Unknown modes
|
||||
// don't count towards the quota and users shouldn't
|
||||
// expect that when downgrading.
|
||||
}
|
||||
quantity := result[deviceClassClaim]
|
||||
quantity.Add(*(resource.NewQuantity(numDevices, resource.DecimalSI)))
|
||||
result[deviceClassClaim] = quantity
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *pvcEvaluator) getStorageUsage(pvc *corev1.PersistentVolumeClaim) *resource.Quantity {
|
||||
var result *resource.Quantity
|
||||
roundUpFunc := func(i *resource.Quantity) *resource.Quantity {
|
||||
roundedRequest := i.DeepCopy()
|
||||
if !roundedRequest.RoundUp(0) {
|
||||
// Ensure storage requests are counted as whole byte values, to pass resourcequota validation.
|
||||
// See https://issue.k8s.io/94313
|
||||
return &roundedRequest
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
if userRequest, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok {
|
||||
result = roundUpFunc(&userRequest)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(k8sfeatures.RecoverVolumeExpansionFailure) && result != nil {
|
||||
if len(pvc.Status.AllocatedResources) == 0 {
|
||||
return result
|
||||
}
|
||||
|
||||
// if AllocatedResources is set and is greater than user request, we should use it.
|
||||
if allocatedRequest, ok := pvc.Status.AllocatedResources[corev1.ResourceStorage]; ok {
|
||||
if allocatedRequest.Cmp(*result) > 0 {
|
||||
result = roundUpFunc(&allocatedRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// UsageStats calculates aggregate usage for the object.
|
||||
func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
func (p *claimEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
|
||||
}
|
||||
|
||||
// ensure we implement required interface
|
||||
var _ quota.Evaluator = &pvcEvaluator{}
|
||||
var _ quota.Evaluator = &claimEvaluator{}
|
||||
|
||||
func toExternalPersistentVolumeClaimOrError(obj runtime.Object) (*corev1.PersistentVolumeClaim, error) {
|
||||
pvc := &corev1.PersistentVolumeClaim{}
|
||||
func toExternalResourceClaimOrError(obj runtime.Object) (*resourceapi.ResourceClaim, error) {
|
||||
claim := &resourceapi.ResourceClaim{}
|
||||
switch t := obj.(type) {
|
||||
case *corev1.PersistentVolumeClaim:
|
||||
pvc = t
|
||||
case *api.PersistentVolumeClaim:
|
||||
if err := k8s_api_v1.Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(t, pvc, nil); err != nil {
|
||||
case *resourceapi.ResourceClaim:
|
||||
claim = t
|
||||
case *resourceinternal.ResourceClaim:
|
||||
if err := resourceversioned.Convert_resource_ResourceClaim_To_v1alpha3_ResourceClaim(t, claim, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("expect *api.PersistentVolumeClaim or *v1.PersistentVolumeClaim, got %v", t)
|
||||
return nil, fmt.Errorf("expect *resource.ResourceClaim or *v1alpha2.ResourceClaim, got %v", t)
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
|
||||
// RequiresQuotaReplenish enables quota monitoring for PVCs.
|
||||
func RequiresQuotaReplenish(pvc, oldPVC *corev1.PersistentVolumeClaim) bool {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(k8sfeatures.RecoverVolumeExpansionFailure) {
|
||||
if oldPVC.Status.AllocatedResources.Storage() != pvc.Status.AllocatedResources.Storage() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return claim, nil
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,209 +17,156 @@ limitations under the License.
|
||||
package core
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
api "k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
func testVolumeClaim(name string, namespace string, spec core.PersistentVolumeClaimSpec) *core.PersistentVolumeClaim {
|
||||
return &core.PersistentVolumeClaim{
|
||||
func testResourceClaim(name string, namespace string, spec api.ResourceClaimSpec) *api.ResourceClaim {
|
||||
return &api.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) {
|
||||
classGold := "gold"
|
||||
validClaim := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: "Exists",
|
||||
},
|
||||
},
|
||||
},
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.VolumeResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10Gi"),
|
||||
},
|
||||
},
|
||||
})
|
||||
validClaimByStorageClass := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: "Exists",
|
||||
},
|
||||
},
|
||||
},
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.VolumeResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10Gi"),
|
||||
},
|
||||
},
|
||||
StorageClassName: &classGold,
|
||||
})
|
||||
func TestResourceClaimEvaluatorUsage(t *testing.T) {
|
||||
classGpu := "gpu"
|
||||
validClaim := testResourceClaim("foo", "ns", api.ResourceClaimSpec{Devices: api.DeviceClaim{Requests: []api.DeviceRequest{{Name: "req-0", DeviceClassName: classGpu, AllocationMode: api.DeviceAllocationModeExactCount, Count: 1}}}})
|
||||
|
||||
validClaimWithNonIntegerStorage := validClaim.DeepCopy()
|
||||
validClaimWithNonIntegerStorage.Spec.Resources.Requests[core.ResourceName(core.ResourceStorage)] = resource.MustParse("1001m")
|
||||
|
||||
validClaimByStorageClassWithNonIntegerStorage := validClaimByStorageClass.DeepCopy()
|
||||
validClaimByStorageClassWithNonIntegerStorage.Spec.Resources.Requests[core.ResourceName(core.ResourceStorage)] = resource.MustParse("1001m")
|
||||
|
||||
evaluator := NewPersistentVolumeClaimEvaluator(nil)
|
||||
evaluator := NewResourceClaimEvaluator(nil)
|
||||
testCases := map[string]struct {
|
||||
pvc *core.PersistentVolumeClaim
|
||||
usage corev1.ResourceList
|
||||
enableRecoverFromExpansion bool
|
||||
claim *api.ResourceClaim
|
||||
usage corev1.ResourceList
|
||||
errMsg string
|
||||
}{
|
||||
"pvc-usage": {
|
||||
pvc: validClaim,
|
||||
"simple": {
|
||||
claim: validClaim,
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsStorage: resource.MustParse("10Gi"),
|
||||
corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"),
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
},
|
||||
enableRecoverFromExpansion: true,
|
||||
},
|
||||
"pvc-usage-by-class": {
|
||||
pvc: validClaimByStorageClass,
|
||||
"many-requests": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
for i := 0; i < 4; i++ {
|
||||
claim.Spec.Devices.Requests = append(claim.Spec.Devices.Requests, claim.Spec.Devices.Requests[0])
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsStorage: resource.MustParse("10Gi"),
|
||||
corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||
V1ResourceByStorageClass(classGold, corev1.ResourceRequestsStorage): resource.MustParse("10Gi"),
|
||||
V1ResourceByStorageClass(classGold, corev1.ResourcePersistentVolumeClaims): resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"),
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("5"),
|
||||
},
|
||||
enableRecoverFromExpansion: true,
|
||||
},
|
||||
|
||||
"pvc-usage-rounded": {
|
||||
pvc: validClaimWithNonIntegerStorage,
|
||||
"count": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].Count = 5
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsStorage: resource.MustParse("2"), // 1001m -> 2
|
||||
corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"),
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("5"),
|
||||
},
|
||||
enableRecoverFromExpansion: true,
|
||||
},
|
||||
"pvc-usage-by-class-rounded": {
|
||||
pvc: validClaimByStorageClassWithNonIntegerStorage,
|
||||
"all": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].AllocationMode = api.DeviceAllocationModeAll
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsStorage: resource.MustParse("2"), // 1001m -> 2
|
||||
corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||
V1ResourceByStorageClass(classGold, corev1.ResourceRequestsStorage): resource.MustParse("2"), // 1001m -> 2
|
||||
V1ResourceByStorageClass(classGold, corev1.ResourcePersistentVolumeClaims): resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"),
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": *resource.NewQuantity(api.AllocationResultsMaxSize, resource.DecimalSI),
|
||||
},
|
||||
enableRecoverFromExpansion: true,
|
||||
},
|
||||
"pvc-usage-higher-allocated-resource": {
|
||||
pvc: getPVCWithAllocatedResource("5G", "10G"),
|
||||
"unknown-count-mode": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].AllocationMode = "future-mode"
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsStorage: resource.MustParse("10G"),
|
||||
corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"),
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("0"),
|
||||
},
|
||||
enableRecoverFromExpansion: true,
|
||||
},
|
||||
"pvc-usage-lower-allocated-resource": {
|
||||
pvc: getPVCWithAllocatedResource("10G", "5G"),
|
||||
"admin": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
// Admins are *not* exempt from quota.
|
||||
claim.Spec.Devices.Requests[0].AdminAccess = true
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsStorage: resource.MustParse("10G"),
|
||||
corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"),
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
},
|
||||
enableRecoverFromExpansion: true,
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.RecoverVolumeExpansionFailure, testCase.enableRecoverFromExpansion)()
|
||||
actual, err := evaluator.Usage(testCase.pvc)
|
||||
actual, err := evaluator.Usage(testCase.claim)
|
||||
if err != nil {
|
||||
t.Errorf("%s unexpected error: %v", testName, err)
|
||||
if testCase.errMsg == "" {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if !strings.Contains(err.Error(), testCase.errMsg) {
|
||||
t.Fatalf("Expected error %q, got error: %v", testCase.errMsg, err.Error())
|
||||
}
|
||||
}
|
||||
if !quota.Equals(testCase.usage, actual) {
|
||||
t.Errorf("%s expected:\n%v\n, actual:\n%v", testName, testCase.usage, actual)
|
||||
if err == nil && testCase.errMsg != "" {
|
||||
t.Fatalf("Expected error %q, got none", testCase.errMsg)
|
||||
}
|
||||
if diff := cmp.Diff(testCase.usage, actual); diff != "" {
|
||||
t.Errorf("Unexpected usage (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func getPVCWithAllocatedResource(pvcSize, allocatedSize string) *core.PersistentVolumeClaim {
|
||||
validPVCWithAllocatedResources := testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
Resources: core.VolumeResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceStorage: resource.MustParse(pvcSize),
|
||||
},
|
||||
},
|
||||
})
|
||||
validPVCWithAllocatedResources.Status.AllocatedResources = core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse(allocatedSize),
|
||||
}
|
||||
return validPVCWithAllocatedResources
|
||||
}
|
||||
|
||||
func TestPersistentVolumeClaimEvaluatorMatchingResources(t *testing.T) {
|
||||
evaluator := NewPersistentVolumeClaimEvaluator(nil)
|
||||
func TestResourceClaimEvaluatorMatchingResources(t *testing.T) {
|
||||
evaluator := NewResourceClaimEvaluator(nil)
|
||||
testCases := map[string]struct {
|
||||
items []corev1.ResourceName
|
||||
want []corev1.ResourceName
|
||||
}{
|
||||
"supported-resources": {
|
||||
items: []corev1.ResourceName{
|
||||
"count/persistentvolumeclaims",
|
||||
"requests.storage",
|
||||
"persistentvolumeclaims",
|
||||
"gold.storageclass.storage.k8s.io/requests.storage",
|
||||
"gold.storageclass.storage.k8s.io/persistentvolumeclaims",
|
||||
"count/resourceclaims.resource.k8s.io",
|
||||
"gpu.deviceclass.resource.k8s.io/devices",
|
||||
},
|
||||
|
||||
want: []corev1.ResourceName{
|
||||
"count/persistentvolumeclaims",
|
||||
"requests.storage",
|
||||
"persistentvolumeclaims",
|
||||
"gold.storageclass.storage.k8s.io/requests.storage",
|
||||
"gold.storageclass.storage.k8s.io/persistentvolumeclaims",
|
||||
"count/resourceclaims.resource.k8s.io",
|
||||
"gpu.deviceclass.resource.k8s.io/devices",
|
||||
},
|
||||
},
|
||||
"unsupported-resources": {
|
||||
items: []corev1.ResourceName{
|
||||
"resourceclaims", // no such alias
|
||||
"storage",
|
||||
"ephemeral-storage",
|
||||
"bronze.storageclass.storage.k8s.io/storage",
|
||||
"gold.storage.k8s.io/requests.storage",
|
||||
"bronze.deviceclass.resource.k8s.io/storage",
|
||||
"gpu.storage.k8s.io/requests.storage",
|
||||
},
|
||||
want: []corev1.ResourceName{},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
actual := evaluator.MatchingResources(testCase.items)
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
actual := evaluator.MatchingResources(testCase.items)
|
||||
|
||||
if !reflect.DeepEqual(testCase.want, actual) {
|
||||
t.Errorf("%s expected:\n%v\n, actual:\n%v", testName, testCase.want, actual)
|
||||
}
|
||||
if diff := cmp.Diff(testCase.want, actual); diff != "" {
|
||||
t.Errorf("Unexpected response (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -6979,6 +6979,8 @@ const (
|
||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||
// resource.k8s.io devices requested with a certain DeviceClass, number
|
||||
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
|
||||
)
|
||||
|
||||
// The following identify resource prefix for Kubernetes object types
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -489,6 +490,56 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.31
|
||||
Testname: ResourceQuota, object count quota, ResourceClaim
|
||||
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
|
||||
Create ResourceClaim. Creation MUST be successful and resource usage count against the ResourceClaim object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
|
||||
Delete the ResourceClaim. Deletion MUST succeed and resource usage count against the ResourceClaim object MUST be released from ResourceQuotaStatus of the ResourceQuota.
|
||||
[NotConformancePromotable] alpha feature
|
||||
*/
|
||||
f.It("should create a ResourceQuota and capture the life of a ResourceClaim", feature.DynamicResourceAllocation, func(ctx context.Context) {
|
||||
ginkgo.By("Counting existing ResourceQuota")
|
||||
c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a ResourceQuota")
|
||||
quotaName := "test-quota"
|
||||
resourceQuota := newTestResourceQuotaDRA(quotaName)
|
||||
_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring resource quota status is calculated")
|
||||
usedResources := v1.ResourceList{}
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
|
||||
usedResources[core.ClaimObjectCountName] = resource.MustParse("0")
|
||||
usedResources[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a ResourceClaim")
|
||||
claim := newTestResourceClaimForQuota("test-claim")
|
||||
claim, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring resource quota status captures resource claim creation")
|
||||
usedResources = v1.ResourceList{}
|
||||
usedResources[core.ClaimObjectCountName] = resource.MustParse("1")
|
||||
usedResources[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Deleting a ResourceClaim")
|
||||
err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring resource quota status released usage")
|
||||
usedResources[core.ClaimObjectCountName] = resource.MustParse("0")
|
||||
usedResources[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.16
|
||||
Testname: ResourceQuota, object count quota, pvc
|
||||
@ -1906,6 +1957,14 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
|
||||
}
|
||||
}
|
||||
|
||||
// newTestResourceQuotaDRA returns a quota that includes hard limits for ResourceClaim objects.
|
||||
func newTestResourceQuotaDRA(name string) *v1.ResourceQuota {
|
||||
quota := newTestResourceQuota(name)
|
||||
quota.Spec.Hard[core.ClaimObjectCountName] = resource.MustParse("1")
|
||||
quota.Spec.Hard[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("1")
|
||||
return quota
|
||||
}
|
||||
|
||||
// newTestPodForQuota returns a pod that has the specified requests and limits
|
||||
func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
@ -2003,6 +2062,23 @@ func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim
|
||||
}
|
||||
}
|
||||
|
||||
// newTestResourceClaimForQuota returns a simple resource claim
|
||||
func newTestResourceClaimForQuota(name string) *resourceapi.ResourceClaim {
|
||||
return &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
Devices: resourceapi.DeviceClaim{
|
||||
Requests: []resourceapi.DeviceRequest{{
|
||||
Name: "req-0",
|
||||
DeviceClassName: classGold,
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newTestReplicationControllerForQuota returns a simple replication controller
|
||||
func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
|
||||
return &v1.ReplicationController{
|
||||
|
Loading…
Reference in New Issue
Block a user