mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Resource Quota enforcement changes for Pod Level Resources
This commit is contained in:
parent
28dea49c04
commit
777221421a
@ -122,6 +122,18 @@ func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime.
|
||||
return err
|
||||
}
|
||||
|
||||
// As mentioned in the subsequent comment, the older versions required explicit
|
||||
// resource requests for CPU & memory for each container if resource quotas were
|
||||
// enabled for these resources. This was a design flaw as resource validation is
|
||||
// coupled with quota enforcement. With pod-level resources
|
||||
// feature, container-level resources are not mandatory. Hence the check for
|
||||
// missing container requests, for CPU/memory resources that have quotas set,
|
||||
// is skipped when pod-level resources feature is enabled and resources are set
|
||||
// at pod level.
|
||||
if feature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container
|
||||
// must make an explicit request for the resource. this was a mistake. it coupled
|
||||
// validation with resource counting, but we did this before QoS was even defined.
|
||||
@ -367,6 +379,8 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
|
||||
|
||||
opts := resourcehelper.PodResourcesOptions{
|
||||
UseStatusResources: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||
SkipPodLevelResources: !feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
|
||||
}
|
||||
requests := resourcehelper.PodRequests(pod, opts)
|
||||
limits := resourcehelper.PodLimits(pod, opts)
|
||||
|
@ -42,9 +42,10 @@ import (
|
||||
|
||||
func TestPodConstraintsFunc(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
pod *api.Pod
|
||||
required []corev1.ResourceName
|
||||
err string
|
||||
pod *api.Pod
|
||||
required []corev1.ResourceName
|
||||
err string
|
||||
podLevelResourcesEnabled bool
|
||||
}{
|
||||
"init container resource missing": {
|
||||
pod: &api.Pod{
|
||||
@ -133,9 +134,30 @@ func TestPodConstraintsFunc(t *testing.T) {
|
||||
required: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU},
|
||||
err: `must specify cpu for: bar,foo; memory for: bar,foo`,
|
||||
},
|
||||
"pod-level resource set, container-level required resources missing": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Resources: &api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("1m")},
|
||||
},
|
||||
Containers: []api.Container{{
|
||||
Name: "foo",
|
||||
Resources: api.ResourceRequirements{},
|
||||
}, {
|
||||
Name: "bar",
|
||||
Resources: api.ResourceRequirements{},
|
||||
}},
|
||||
},
|
||||
},
|
||||
required: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU},
|
||||
podLevelResourcesEnabled: true,
|
||||
err: ``,
|
||||
},
|
||||
}
|
||||
evaluator := NewPodEvaluator(nil, clock.RealClock{})
|
||||
for testName, test := range testCases {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesEnabled)
|
||||
|
||||
err := evaluator.Constraints(test.required, test.pod)
|
||||
switch {
|
||||
case err != nil && len(test.err) == 0,
|
||||
@ -158,8 +180,9 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||
deletionTimestampNotPastGracePeriod := metav1.NewTime(fakeClock.Now())
|
||||
|
||||
testCases := map[string]struct {
|
||||
pod *api.Pod
|
||||
usage corev1.ResourceList
|
||||
pod *api.Pod
|
||||
usage corev1.ResourceList
|
||||
podLevelResourcesEnabled bool
|
||||
}{
|
||||
"init container CPU": {
|
||||
pod: &api.Pod{
|
||||
@ -529,10 +552,74 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"pod-level CPU": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Resources: &api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("1m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("2m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
podLevelResourcesEnabled: true,
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsCPU: resource.MustParse("1m"),
|
||||
corev1.ResourceLimitsCPU: resource.MustParse("2m"),
|
||||
corev1.ResourcePods: resource.MustParse("1"),
|
||||
corev1.ResourceCPU: resource.MustParse("1m"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"pod-level Memory": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Resources: &api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceMemory: resource.MustParse("1Mi")},
|
||||
Limits: api.ResourceList{api.ResourceMemory: resource.MustParse("2Mi")},
|
||||
},
|
||||
},
|
||||
},
|
||||
podLevelResourcesEnabled: true,
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceRequestsMemory: resource.MustParse("1Mi"),
|
||||
corev1.ResourceLimitsMemory: resource.MustParse("2Mi"),
|
||||
corev1.ResourcePods: resource.MustParse("1"),
|
||||
corev1.ResourceMemory: resource.MustParse("1Mi"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"pod-level memory with container-level ephemeral storage": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Resources: &api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceMemory: resource.MustParse("1Mi")},
|
||||
Limits: api.ResourceList{api.ResourceMemory: resource.MustParse("2Mi")},
|
||||
},
|
||||
Containers: []api.Container{{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceEphemeralStorage: resource.MustParse("32Mi")},
|
||||
Limits: api.ResourceList{api.ResourceEphemeralStorage: resource.MustParse("64Mi")},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
podLevelResourcesEnabled: true,
|
||||
usage: corev1.ResourceList{
|
||||
corev1.ResourceEphemeralStorage: resource.MustParse("32Mi"),
|
||||
corev1.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"),
|
||||
corev1.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"),
|
||||
corev1.ResourcePods: resource.MustParse("1"),
|
||||
corev1.ResourceRequestsMemory: resource.MustParse("1Mi"),
|
||||
corev1.ResourceLimitsMemory: resource.MustParse("2Mi"),
|
||||
corev1.ResourceMemory: resource.MustParse("1Mi"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
}
|
||||
t.Parallel()
|
||||
for testName, testCase := range testCases {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, testCase.podLevelResourcesEnabled)
|
||||
actual, err := evaluator.Usage(testCase.pod)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
|
Loading…
Reference in New Issue
Block a user