Fix incorrect calculation for ResourceQuota with PriorityClass as its scope

This commit is contained in:
Wei Huang 2023-04-30 15:30:12 -07:00
parent f9340f3cf1
commit edd032e22b
No known key found for this signature in database
GPG Key ID: 17AFE05D01EA77B2
2 changed files with 187 additions and 0 deletions

View File

@ -328,6 +328,10 @@ func podMatchesScopeFunc(selector corev1.ScopedResourceSelectorRequirement, obje
case corev1.ResourceQuotaScopeNotBestEffort:
return !isBestEffort(pod), nil
case corev1.ResourceQuotaScopePriorityClass:
if len(selector.Operator) == 0 && selector.Values == nil {
// this is just checking for existence of a priorityClass on the pod
return len(pod.Spec.PriorityClassName) != 0, nil
}
return podMatchesSelector(pod, selector)
case corev1.ResourceQuotaScopeCrossNamespacePodAffinity:
return usesCrossNamespacePodAffinity(pod), nil

View File

@ -17,6 +17,7 @@ limitations under the License.
package core
import (
"fmt"
"testing"
"time"
@ -24,10 +25,12 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
quota "k8s.io/apiserver/pkg/quota/v1"
"k8s.io/apiserver/pkg/quota/v1/generic"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
featuregatetesting "k8s.io/component-base/featuregate/testing"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
@ -540,6 +543,146 @@ func TestPodEvaluatorUsage(t *testing.T) {
}
}
func TestPodEvaluatorUsageStats(t *testing.T) {
cpu1 := api.ResourceList{api.ResourceCPU: resource.MustParse("1")}
tests := []struct {
name string
objs []runtime.Object
quotaScopes []corev1.ResourceQuotaScope
quotaScopeSelector *corev1.ScopeSelector
want corev1.ResourceList
}{
{
name: "nil case",
},
{
name: "all pods in running state",
objs: []runtime.Object{
makePod("p1", "", cpu1, api.PodRunning),
makePod("p2", "", cpu1, api.PodRunning),
},
want: corev1.ResourceList{
corev1.ResourcePods: resource.MustParse("2"),
corev1.ResourceName("count/pods"): resource.MustParse("2"),
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceRequestsCPU: resource.MustParse("2"),
corev1.ResourceLimitsCPU: resource.MustParse("2"),
},
},
{
name: "one pods in terminal state",
objs: []runtime.Object{
makePod("p1", "", cpu1, api.PodRunning),
makePod("p2", "", cpu1, api.PodSucceeded),
},
want: corev1.ResourceList{
corev1.ResourcePods: resource.MustParse("1"),
corev1.ResourceName("count/pods"): resource.MustParse("2"),
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceRequestsCPU: resource.MustParse("1"),
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
{
name: "partial pods matching quotaScopeSelector",
objs: []runtime.Object{
makePod("p1", "high-priority", cpu1, api.PodRunning),
makePod("p2", "high-priority", cpu1, api.PodSucceeded),
makePod("p3", "low-priority", cpu1, api.PodRunning),
},
quotaScopeSelector: &corev1.ScopeSelector{
MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
{
ScopeName: corev1.ResourceQuotaScopePriorityClass,
Operator: corev1.ScopeSelectorOpIn,
Values: []string{"high-priority"},
},
},
},
want: corev1.ResourceList{
corev1.ResourcePods: resource.MustParse("1"),
corev1.ResourceName("count/pods"): resource.MustParse("2"),
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceRequestsCPU: resource.MustParse("1"),
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
{
name: "partial pods matching quotaScopeSelector - w/ scopeName specified",
objs: []runtime.Object{
makePod("p1", "high-priority", cpu1, api.PodRunning),
makePod("p2", "high-priority", cpu1, api.PodSucceeded),
makePod("p3", "low-priority", cpu1, api.PodRunning),
},
quotaScopes: []corev1.ResourceQuotaScope{
corev1.ResourceQuotaScopePriorityClass,
},
quotaScopeSelector: &corev1.ScopeSelector{
MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
{
ScopeName: corev1.ResourceQuotaScopePriorityClass,
Operator: corev1.ScopeSelectorOpIn,
Values: []string{"high-priority"},
},
},
},
want: corev1.ResourceList{
corev1.ResourcePods: resource.MustParse("1"),
corev1.ResourceName("count/pods"): resource.MustParse("2"),
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceRequestsCPU: resource.MustParse("1"),
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
{
name: "partial pods matching quotaScopeSelector - w/ multiple scopeNames specified",
objs: []runtime.Object{
makePod("p1", "high-priority", cpu1, api.PodRunning),
makePod("p2", "high-priority", cpu1, api.PodSucceeded),
makePod("p3", "low-priority", cpu1, api.PodRunning),
makePod("p4", "high-priority", nil, api.PodFailed),
},
quotaScopes: []corev1.ResourceQuotaScope{
corev1.ResourceQuotaScopePriorityClass,
corev1.ResourceQuotaScopeBestEffort,
},
quotaScopeSelector: &corev1.ScopeSelector{
MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
{
ScopeName: corev1.ResourceQuotaScopePriorityClass,
Operator: corev1.ScopeSelectorOpIn,
Values: []string{"high-priority"},
},
},
},
want: corev1.ResourceList{
corev1.ResourceName("count/pods"): resource.MustParse("1"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gvr := corev1.SchemeGroupVersion.WithResource("pods")
listerForPod := map[schema.GroupVersionResource]cache.GenericLister{
gvr: newGenericLister(gvr.GroupResource(), tt.objs),
}
evaluator := NewPodEvaluator(mockListerForResourceFunc(listerForPod), testingclock.NewFakeClock(time.Now()))
usageStatsOption := quota.UsageStatsOptions{
Scopes: tt.quotaScopes,
ScopeSelector: tt.quotaScopeSelector,
}
actual, err := evaluator.UsageStats(usageStatsOption)
if err != nil {
t.Error(err)
}
if !quota.Equals(tt.want, actual.Used) {
t.Errorf("expected: %v, actual: %v", tt.want, actual.Used)
}
})
}
}
func TestPodEvaluatorMatchingScopes(t *testing.T) {
fakeClock := testingclock.NewFakeClock(time.Now())
evaluator := NewPodEvaluator(nil, fakeClock)
@ -961,3 +1104,43 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
}
}
}
func mockListerForResourceFunc(listerForResource map[schema.GroupVersionResource]cache.GenericLister) quota.ListerForResourceFunc {
return func(gvr schema.GroupVersionResource) (cache.GenericLister, error) {
lister, found := listerForResource[gvr]
if !found {
return nil, fmt.Errorf("no lister found for resource %v", gvr)
}
return lister, nil
}
}
func newGenericLister(groupResource schema.GroupResource, items []runtime.Object) cache.GenericLister {
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
for _, item := range items {
store.Add(item)
}
return cache.NewGenericLister(store, groupResource)
}
func makePod(name, pcName string, resList api.ResourceList, phase api.PodPhase) *api.Pod {
return &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: api.PodSpec{
PriorityClassName: pcName,
Containers: []api.Container{
{
Resources: api.ResourceRequirements{
Requests: resList,
Limits: resList,
},
},
},
},
Status: api.PodStatus{
Phase: phase,
},
}
}