mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-14 05:36:12 +00:00
Introduce priority class in the resource quota
This commit is contained in:
committed by
vikaschoudhary16
parent
4c13f5fdf5
commit
3cfe6412c7
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -73,6 +74,14 @@ func validPod(name string, numContainers int, resources api.ResourceRequirements
|
||||
return pod
|
||||
}
|
||||
|
||||
func validPodWithPriority(name string, numContainers int, resources api.ResourceRequirements, priorityClass string) *api.Pod {
|
||||
pod := validPod(name, numContainers, resources)
|
||||
if priorityClass != "" {
|
||||
pod.Spec.PriorityClassName = priorityClass
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func validPersistentVolumeClaim(name string, resources api.ResourceRequirements) *api.PersistentVolumeClaim {
|
||||
return &api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
|
||||
@@ -1445,3 +1454,672 @@ func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) {
|
||||
t.Fatalf("Expected an error since the quota did not cover cpu")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmitLimitedScopeWithQuota verifies if a limited scope is configured the quota must cover the resource.
|
||||
func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
testPod *api.Pod
|
||||
quota *api.ResourceQuota
|
||||
anotherQuota *api.ResourceQuota
|
||||
config *resourcequotaapi.Configuration
|
||||
expErr string
|
||||
}{
|
||||
{
|
||||
description: "Covering quota exists for configured limited scope PriorityClassNameExists.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "configured limited scope PriorityClassNameExists and limited cpu resource. No covering quota for cpu and pod admit fails.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
MatchContains: []string{"requests.cpu"}, // match on "requests.cpu" only
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to consume: requests.cpu",
|
||||
},
|
||||
{
|
||||
description: "Covering quota does not exist for configured limited scope PriorityClassNameExists.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Covering quota does not exist for configured limited scope resourceQuotaBestEffort",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Covering quota exist for configured limited scope resourceQuotaBestEffort",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Neither matches pod. Pod allowed",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only BestEffort scope matches pod. Pod admit fails because covering quota is missing for BestEffort scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only PriorityClass scope matches pod. Pod admit fails because covering quota is missing for PriorityClass scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Pod admit fails because covering quota is missing for PriorityClass scope and BestEffort scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []} {PriorityClass In [cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for BestEffort scope. Pod admit fails because covering quota is missing for PriorityClass scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for PriorityClass scope. Pod admit fails because covering quota is missing for BestEffort scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for both the scopes. Pod admit success. No Error",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
anotherQuota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod allowed with priorityclass if limited scope PriorityClassNameExists not configured.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "quota fails, though covering quota for configured limited scope PriorityClassNameExists exists.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "20Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("10Gi"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "forbidden: exceeded quota: quota, requested: memory=20Gi, used: memory=1Gi, limited: memory=10Gi",
|
||||
},
|
||||
{
|
||||
description: "Pod has different priorityclass than configured limited. Covering quota exists for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota does not exist for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [another-priorityclass-name cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "From the above test case, just changing pod priority from cluster-services to another-priorityclass-name. expecting no error",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "another-priorityclass-name"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota does NOT exists for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [another-priorityclass-name cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn through PriorityClassNameExists",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
newPod := testCase.testPod
|
||||
config := testCase.config
|
||||
resourceQuota := testCase.quota
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
if testCase.anotherQuota != nil {
|
||||
kubeClient = fake.NewSimpleClientset(resourceQuota, testCase.anotherQuota)
|
||||
}
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
quotaAccessor, _ := newQuotaAccessor()
|
||||
quotaAccessor.client = kubeClient
|
||||
quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister()
|
||||
|
||||
quotaConfiguration := install.NewQuotaConfigurationForAdmission()
|
||||
evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh)
|
||||
|
||||
handler := &QuotaAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
evaluator: evaluator,
|
||||
}
|
||||
indexer.Add(resourceQuota)
|
||||
if testCase.anotherQuota != nil {
|
||||
indexer.Add(testCase.anotherQuota)
|
||||
}
|
||||
err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
|
||||
if testCase.expErr == "" {
|
||||
if err != nil {
|
||||
t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(fmt.Sprintf("%v", err), testCase.expErr) {
|
||||
t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
|
@@ -16,7 +16,10 @@ limitations under the License.
|
||||
|
||||
package resourcequota
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
@@ -54,4 +57,16 @@ type LimitedResource struct {
|
||||
// with any storage class, the list would include
|
||||
// ".storageclass.storage.k8s.io/requests.storage"
|
||||
MatchContains []string
|
||||
|
||||
// For each intercepted request, the quota system will figure out if the input object
|
||||
// satisfies a scope which is present in this listing, then
|
||||
// quota system will ensure that there is a covering quota. In the
|
||||
// absence of a covering quota, the quota system will deny the request.
|
||||
// For example, if an administrator wants to globally enforce that
|
||||
// a quota must exist to create a pod with "cluster-services" priorityclass
|
||||
// the list would include
|
||||
// "PriorityClassNameIn=cluster-services"
|
||||
// +optional
|
||||
// MatchScopes []string `json:"matchScopes,omitempty"`
|
||||
MatchScopes []core.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"`
|
||||
}
|
||||
|
@@ -18,7 +18,9 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
@@ -16,7 +16,10 @@ limitations under the License.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
@@ -54,4 +57,13 @@ type LimitedResource struct {
|
||||
// with any storage class, the list would include
|
||||
// ".storageclass.storage.k8s.io/requests.storage"
|
||||
MatchContains []string `json:"matchContains,omitempty"`
|
||||
// For each intercepted request, the quota system will figure out if the input object
|
||||
// satisfies a scope which is present in this listing, then
|
||||
// quota system will ensure that there is a covering quota. In the
|
||||
// absence of a covering quota, the quota system will deny the request.
|
||||
// For example, if an administrator wants to globally enforce that
|
||||
// a quota must exist to create a pod with "cluster-services" priorityclass
|
||||
// the list would include "scopeName=PriorityClass, Operator=In, Value=cluster-services"
|
||||
// +optional
|
||||
MatchScopes []v1.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"`
|
||||
}
|
||||
|
@@ -23,8 +23,10 @@ package v1alpha1
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
resourcequota "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
)
|
||||
|
||||
@@ -67,6 +69,7 @@ func autoConvert_v1alpha1_LimitedResource_To_resourcequota_LimitedResource(in *L
|
||||
out.APIGroup = in.APIGroup
|
||||
out.Resource = in.Resource
|
||||
out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains))
|
||||
out.MatchScopes = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -79,6 +82,7 @@ func autoConvert_resourcequota_LimitedResource_To_v1alpha1_LimitedResource(in *r
|
||||
out.APIGroup = in.APIGroup
|
||||
out.Resource = in.Resource
|
||||
out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains))
|
||||
out.MatchScopes = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -64,6 +65,13 @@ func (in *LimitedResource) DeepCopyInto(out *LimitedResource) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MatchScopes != nil {
|
||||
in, out := &in.MatchScopes, &out.MatchScopes
|
||||
*out = make([]v1.ScopedResourceSelectorRequirement, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -22,6 +22,7 @@ package resourcequota
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
@@ -64,6 +65,13 @@ func (in *LimitedResource) DeepCopyInto(out *LimitedResource) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MatchScopes != nil {
|
||||
in, out := &in.MatchScopes, &out.MatchScopes
|
||||
*out = make([]core.ScopedResourceSelectorRequirement, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -367,6 +368,21 @@ func limitedByDefault(usage api.ResourceList, limitedResources []resourcequotaap
|
||||
return result
|
||||
}
|
||||
|
||||
func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]api.ScopedResourceSelectorRequirement, error) {
|
||||
scopes := []api.ScopedResourceSelectorRequirement{}
|
||||
for _, limitedResource := range limitedResources {
|
||||
matched, err := evaluator.MatchingScopes(inputObject, limitedResource.MatchScopes)
|
||||
if err != nil {
|
||||
glog.Errorf("Error while matching limited Scopes: %v", err)
|
||||
return []api.ScopedResourceSelectorRequirement{}, err
|
||||
}
|
||||
for _, scope := range matched {
|
||||
scopes = append(scopes, scope)
|
||||
}
|
||||
}
|
||||
return scopes, nil
|
||||
}
|
||||
|
||||
// checkRequest verifies that the request does not exceed any quota constraint. it returns a copy of quotas not yet persisted
|
||||
// that capture what the usage would be if the request succeeded. It return an error if there is insufficient quota to satisfy the request
|
||||
func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.Attributes) ([]api.ResourceQuota, error) {
|
||||
@@ -383,6 +399,12 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
// if we have limited resources enabled for this resource, always calculate usage
|
||||
inputObject := a.GetObject()
|
||||
|
||||
// Check if object matches AdmissionConfiguration matchScopes
|
||||
limitedScopes, err := getMatchedLimitedScopes(evaluator, inputObject, e.config.LimitedResources)
|
||||
if err != nil {
|
||||
return quotas, nil
|
||||
}
|
||||
|
||||
// determine the set of resource names that must exist in a covering quota
|
||||
limitedResourceNames := []api.ResourceName{}
|
||||
limitedResources := filterLimitedResourcesByGroupResource(e.config.LimitedResources, a.GetResource().GroupResource())
|
||||
@@ -404,10 +426,21 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
// this is needed to know if we have satisfied any constraints where consumption
|
||||
// was limited by default.
|
||||
restrictedResourcesSet := sets.String{}
|
||||
restrictedScopes := []api.ScopedResourceSelectorRequirement{}
|
||||
for i := range quotas {
|
||||
resourceQuota := quotas[i]
|
||||
scopeSelectors := getScopeSelectorsFromQuota(resourceQuota)
|
||||
localRestrictedScopes, err := evaluator.MatchingScopes(inputObject, scopeSelectors)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error matching scopes of quota %s, err: %v", resourceQuota.Name, err)
|
||||
}
|
||||
for _, scope := range localRestrictedScopes {
|
||||
restrictedScopes = append(restrictedScopes, scope)
|
||||
}
|
||||
|
||||
match, err := evaluator.Matches(&resourceQuota, inputObject)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while matching resource quota, %v, against input object. Err: %v", resourceQuota, err)
|
||||
return quotas, err
|
||||
}
|
||||
if !match {
|
||||
@@ -435,6 +468,17 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ",")))
|
||||
}
|
||||
|
||||
// verify that for every scope that had limited access enabled
|
||||
// that there was a corresponding quota that covered it.
|
||||
// if not, we reject the request.
|
||||
scopesHasNoCoveringQuota, err := evaluator.UncoveredQuotaScopes(limitedScopes, restrictedScopes)
|
||||
if err != nil {
|
||||
return quotas, err
|
||||
}
|
||||
if len(scopesHasNoCoveringQuota) > 0 {
|
||||
return quotas, fmt.Errorf("insufficient quota to match these scopes: %v", scopesHasNoCoveringQuota)
|
||||
}
|
||||
|
||||
if len(interestingQuotaIndexes) == 0 {
|
||||
return quotas, nil
|
||||
}
|
||||
@@ -516,6 +560,21 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
return outQuotas, nil
|
||||
}
|
||||
|
||||
func getScopeSelectorsFromQuota(quota api.ResourceQuota) []api.ScopedResourceSelectorRequirement {
|
||||
selectors := []api.ScopedResourceSelectorRequirement{}
|
||||
for _, scope := range quota.Spec.Scopes {
|
||||
selectors = append(selectors, api.ScopedResourceSelectorRequirement{
|
||||
ScopeName: scope,
|
||||
Operator: api.ScopeSelectorOpExists})
|
||||
}
|
||||
if quota.Spec.ScopeSelector != nil {
|
||||
for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions {
|
||||
selectors = append(selectors, scopeSelector)
|
||||
}
|
||||
}
|
||||
return selectors
|
||||
}
|
||||
|
||||
func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
|
||||
e.init.Do(func() {
|
||||
go e.run()
|
||||
|
Reference in New Issue
Block a user