mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #120611 from pohly/dra-resource-quotas
DRA: resource quotas
This commit is contained in:
commit
05bb5f71f8
@ -5799,6 +5799,8 @@ const (
|
||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||
// resource.k8s.io devices requested with a certain DeviceClass, number
|
||||
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
|
||||
)
|
||||
|
||||
// The following identify resource prefix for Kubernetes object types
|
||||
|
@ -19344,6 +19344,10 @@ func TestValidateResourceQuota(t *testing.T) {
|
||||
core.ResourceQuotas: resource.MustParse("10"),
|
||||
core.ResourceConfigMaps: resource.MustParse("10"),
|
||||
core.ResourceSecrets: resource.MustParse("10"),
|
||||
|
||||
// These are unknown and not enforced unless DRA is enabled, but not invalid.
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gold.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@ -40,6 +42,10 @@ func NewEvaluators(f quota.ListerForResourceFunc) []quota.Evaluator {
|
||||
NewServiceEvaluator(f),
|
||||
NewPersistentVolumeClaimEvaluator(f),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||
result = append(result, NewResourceClaimEvaluator(f))
|
||||
}
|
||||
|
||||
// these evaluators require an alias for backwards compatibility
|
||||
for gvr, alias := range legacyObjectCountAliases {
|
||||
result = append(result,
|
||||
|
162
pkg/quota/v1/evaluator/core/resource_claims.go
Normal file
162
pkg/quota/v1/evaluator/core/resource_claims.go
Normal file
@ -0,0 +1,162 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
quota "k8s.io/apiserver/pkg/quota/v1"
|
||||
"k8s.io/apiserver/pkg/quota/v1/generic"
|
||||
resourceinternal "k8s.io/kubernetes/pkg/apis/resource"
|
||||
resourceversioned "k8s.io/kubernetes/pkg/apis/resource/v1alpha3"
|
||||
)
|
||||
|
||||
// The name used for object count quota. This evaluator takes over counting
|
||||
// those because of it's GroupResource, so it has to implement this
|
||||
// count.
|
||||
var ClaimObjectCountName = generic.ObjectCountQuotaResourceNameFor(resourceapi.SchemeGroupVersion.WithResource("resourceclaims").GroupResource())
|
||||
|
||||
// V1ResourceByDeviceClass returns a quota resource name by device class.
|
||||
func V1ResourceByDeviceClass(className string) corev1.ResourceName {
|
||||
return corev1.ResourceName(className + corev1.ResourceClaimsPerClass)
|
||||
}
|
||||
|
||||
// NewResourceClaimEvaluator returns an evaluator that can evaluate resource claims
|
||||
func NewResourceClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator {
|
||||
listFuncByNamespace := generic.ListResourceUsingListerFunc(f, resourceapi.SchemeGroupVersion.WithResource("resourceclaims"))
|
||||
claimEvaluator := &claimEvaluator{listFuncByNamespace: listFuncByNamespace}
|
||||
return claimEvaluator
|
||||
}
|
||||
|
||||
// claimEvaluator knows how to evaluate quota usage for resource claims
|
||||
type claimEvaluator struct {
|
||||
// listFuncByNamespace knows how to list resource claims
|
||||
listFuncByNamespace generic.ListFuncByNamespace
|
||||
}
|
||||
|
||||
// Constraints verifies that all required resources are present on the item.
|
||||
func (p *claimEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
|
||||
// no-op for resource claims
|
||||
return nil
|
||||
}
|
||||
|
||||
// GroupResource that this evaluator tracks
|
||||
func (p *claimEvaluator) GroupResource() schema.GroupResource {
|
||||
return resourceapi.SchemeGroupVersion.WithResource("resourceclaims").GroupResource()
|
||||
}
|
||||
|
||||
// Handles returns true if the evaluator should handle the specified operation.
|
||||
func (p *claimEvaluator) Handles(a admission.Attributes) bool {
|
||||
op := a.GetOperation()
|
||||
if op == admission.Create {
|
||||
return true
|
||||
}
|
||||
if op == admission.Update {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Matches returns true if the evaluator matches the specified quota with the provided input item
|
||||
func (p *claimEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
|
||||
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
|
||||
}
|
||||
|
||||
// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches.
|
||||
func (p *claimEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
|
||||
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
|
||||
func (p *claimEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
|
||||
return []corev1.ScopedResourceSelectorRequirement{}, nil
|
||||
}
|
||||
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||
func (p *claimEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName {
|
||||
result := []corev1.ResourceName{}
|
||||
for _, item := range items {
|
||||
if item == ClaimObjectCountName /* object count quota fields */ ||
|
||||
strings.HasSuffix(string(item), corev1.ResourceClaimsPerClass /* by device class */) {
|
||||
result = append(result, item)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Usage knows how to measure usage associated with item.
|
||||
func (p *claimEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) {
|
||||
result := corev1.ResourceList{}
|
||||
claim, err := toExternalResourceClaimOrError(item)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// charge for claim
|
||||
result[ClaimObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI))
|
||||
for _, request := range claim.Spec.Devices.Requests {
|
||||
deviceClassClaim := V1ResourceByDeviceClass(request.DeviceClassName)
|
||||
var numDevices int64
|
||||
switch request.AllocationMode {
|
||||
case resourceapi.DeviceAllocationModeExactCount:
|
||||
numDevices = request.Count
|
||||
case resourceapi.DeviceAllocationModeAll:
|
||||
// Worst case...
|
||||
numDevices = resourceapi.AllocationResultsMaxSize
|
||||
default:
|
||||
// Could happen after a downgrade. Unknown modes
|
||||
// don't count towards the quota and users shouldn't
|
||||
// expect that when downgrading.
|
||||
}
|
||||
quantity := result[deviceClassClaim]
|
||||
quantity.Add(*(resource.NewQuantity(numDevices, resource.DecimalSI)))
|
||||
result[deviceClassClaim] = quantity
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UsageStats calculates aggregate usage for the object.
|
||||
func (p *claimEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
|
||||
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
|
||||
}
|
||||
|
||||
// ensure we implement required interface
|
||||
var _ quota.Evaluator = &claimEvaluator{}
|
||||
|
||||
func toExternalResourceClaimOrError(obj runtime.Object) (*resourceapi.ResourceClaim, error) {
|
||||
claim := &resourceapi.ResourceClaim{}
|
||||
switch t := obj.(type) {
|
||||
case *resourceapi.ResourceClaim:
|
||||
claim = t
|
||||
case *resourceinternal.ResourceClaim:
|
||||
if err := resourceversioned.Convert_resource_ResourceClaim_To_v1alpha3_ResourceClaim(t, claim, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("expect *resource.ResourceClaim or *v1alpha2.ResourceClaim, got %v", t)
|
||||
}
|
||||
return claim, nil
|
||||
}
|
172
pkg/quota/v1/evaluator/core/resource_claims_test.go
Normal file
172
pkg/quota/v1/evaluator/core/resource_claims_test.go
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
func testResourceClaim(name string, namespace string, spec api.ResourceClaimSpec) *api.ResourceClaim {
|
||||
return &api.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceClaimEvaluatorUsage(t *testing.T) {
|
||||
classGpu := "gpu"
|
||||
validClaim := testResourceClaim("foo", "ns", api.ResourceClaimSpec{Devices: api.DeviceClaim{Requests: []api.DeviceRequest{{Name: "req-0", DeviceClassName: classGpu, AllocationMode: api.DeviceAllocationModeExactCount, Count: 1}}}})
|
||||
|
||||
evaluator := NewResourceClaimEvaluator(nil)
|
||||
testCases := map[string]struct {
|
||||
claim *api.ResourceClaim
|
||||
usage corev1.ResourceList
|
||||
errMsg string
|
||||
}{
|
||||
"simple": {
|
||||
claim: validClaim,
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"many-requests": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
for i := 0; i < 4; i++ {
|
||||
claim.Spec.Devices.Requests = append(claim.Spec.Devices.Requests, claim.Spec.Devices.Requests[0])
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("5"),
|
||||
},
|
||||
},
|
||||
"count": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].Count = 5
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("5"),
|
||||
},
|
||||
},
|
||||
"all": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].AllocationMode = api.DeviceAllocationModeAll
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": *resource.NewQuantity(api.AllocationResultsMaxSize, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
"unknown-count-mode": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
claim.Spec.Devices.Requests[0].AllocationMode = "future-mode"
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
"admin": {
|
||||
claim: func() *api.ResourceClaim {
|
||||
claim := validClaim.DeepCopy()
|
||||
// Admins are *not* exempt from quota.
|
||||
claim.Spec.Devices.Requests[0].AdminAccess = true
|
||||
return claim
|
||||
}(),
|
||||
usage: corev1.ResourceList{
|
||||
"count/resourceclaims.resource.k8s.io": resource.MustParse("1"),
|
||||
"gpu.deviceclass.resource.k8s.io/devices": resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
actual, err := evaluator.Usage(testCase.claim)
|
||||
if err != nil {
|
||||
if testCase.errMsg == "" {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if !strings.Contains(err.Error(), testCase.errMsg) {
|
||||
t.Fatalf("Expected error %q, got error: %v", testCase.errMsg, err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil && testCase.errMsg != "" {
|
||||
t.Fatalf("Expected error %q, got none", testCase.errMsg)
|
||||
}
|
||||
if diff := cmp.Diff(testCase.usage, actual); diff != "" {
|
||||
t.Errorf("Unexpected usage (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceClaimEvaluatorMatchingResources(t *testing.T) {
|
||||
evaluator := NewResourceClaimEvaluator(nil)
|
||||
testCases := map[string]struct {
|
||||
items []corev1.ResourceName
|
||||
want []corev1.ResourceName
|
||||
}{
|
||||
"supported-resources": {
|
||||
items: []corev1.ResourceName{
|
||||
"count/resourceclaims.resource.k8s.io",
|
||||
"gpu.deviceclass.resource.k8s.io/devices",
|
||||
},
|
||||
|
||||
want: []corev1.ResourceName{
|
||||
"count/resourceclaims.resource.k8s.io",
|
||||
"gpu.deviceclass.resource.k8s.io/devices",
|
||||
},
|
||||
},
|
||||
"unsupported-resources": {
|
||||
items: []corev1.ResourceName{
|
||||
"resourceclaims", // no such alias
|
||||
"storage",
|
||||
"ephemeral-storage",
|
||||
"bronze.deviceclass.resource.k8s.io/storage",
|
||||
"gpu.storage.k8s.io/requests.storage",
|
||||
},
|
||||
want: []corev1.ResourceName{},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
actual := evaluator.MatchingResources(testCase.items)
|
||||
|
||||
if diff := cmp.Diff(testCase.want, actual); diff != "" {
|
||||
t.Errorf("Unexpected response (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -6979,6 +6979,8 @@ const (
|
||||
ResourceLimitsMemory ResourceName = "limits.memory"
|
||||
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||
// resource.k8s.io devices requested with a certain DeviceClass, number
|
||||
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
|
||||
)
|
||||
|
||||
// The following identify resource prefix for Kubernetes object types
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -489,6 +490,56 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.31
|
||||
Testname: ResourceQuota, object count quota, ResourceClaim
|
||||
Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
|
||||
Create ResourceClaim. Creation MUST be successful and resource usage count against the ResourceClaim object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
|
||||
Delete the ResourceClaim. Deletion MUST succeed and resource usage count against the ResourceClaim object MUST be released from ResourceQuotaStatus of the ResourceQuota.
|
||||
[NotConformancePromotable] alpha feature
|
||||
*/
|
||||
f.It("should create a ResourceQuota and capture the life of a ResourceClaim", feature.DynamicResourceAllocation, func(ctx context.Context) {
|
||||
ginkgo.By("Counting existing ResourceQuota")
|
||||
c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a ResourceQuota")
|
||||
quotaName := "test-quota"
|
||||
resourceQuota := newTestResourceQuotaDRA(quotaName)
|
||||
_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring resource quota status is calculated")
|
||||
usedResources := v1.ResourceList{}
|
||||
usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
|
||||
usedResources[core.ClaimObjectCountName] = resource.MustParse("0")
|
||||
usedResources[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a ResourceClaim")
|
||||
claim := newTestResourceClaimForQuota("test-claim")
|
||||
claim, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring resource quota status captures resource claim creation")
|
||||
usedResources = v1.ResourceList{}
|
||||
usedResources[core.ClaimObjectCountName] = resource.MustParse("1")
|
||||
usedResources[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("1")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Deleting a ResourceClaim")
|
||||
err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring resource quota status released usage")
|
||||
usedResources[core.ClaimObjectCountName] = resource.MustParse("0")
|
||||
usedResources[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.16
|
||||
Testname: ResourceQuota, object count quota, pvc
|
||||
@ -1906,6 +1957,14 @@ func newTestResourceQuota(name string) *v1.ResourceQuota {
|
||||
}
|
||||
}
|
||||
|
||||
// newTestResourceQuotaDRA returns a quota that includes hard limits for ResourceClaim objects.
|
||||
func newTestResourceQuotaDRA(name string) *v1.ResourceQuota {
|
||||
quota := newTestResourceQuota(name)
|
||||
quota.Spec.Hard[core.ClaimObjectCountName] = resource.MustParse("1")
|
||||
quota.Spec.Hard[core.V1ResourceByDeviceClass(classGold)] = resource.MustParse("1")
|
||||
return quota
|
||||
}
|
||||
|
||||
// newTestPodForQuota returns a pod that has the specified requests and limits
|
||||
func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
@ -2003,6 +2062,23 @@ func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim
|
||||
}
|
||||
}
|
||||
|
||||
// newTestResourceClaimForQuota returns a simple resource claim
|
||||
func newTestResourceClaimForQuota(name string) *resourceapi.ResourceClaim {
|
||||
return &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
Devices: resourceapi.DeviceClaim{
|
||||
Requests: []resourceapi.DeviceRequest{{
|
||||
Name: "req-0",
|
||||
DeviceClassName: classGold,
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newTestReplicationControllerForQuota returns a simple replication controller
|
||||
func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
|
||||
return &v1.ReplicationController{
|
||||
|
@ -1051,7 +1051,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
b.testPod(ctx, f.ClientSet, pod)
|
||||
})
|
||||
|
||||
ginkgo.It("supports count/resourceclaim.resource ResourceQuota", func(ctx context.Context) {
|
||||
ginkgo.It("supports count/resourceclaims.resource.k8s.io ResourceQuota", func(ctx context.Context) {
|
||||
claim := &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claim-0",
|
||||
|
Loading…
Reference in New Issue
Block a user