diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index db02fbd7dd4..55a65b056bb 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -32,14 +32,17 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" genericadmissioninitailizer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/utils/lru" api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/features" ) const ( @@ -528,8 +531,11 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error { // enforce pod limits on init containers if limitType == corev1.LimitTypePod { - podRequests := podRequests(pod) - podLimits := podLimits(pod) + opts := podResourcesOptions{ + PodLevelResourcesEnabled: feature.DefaultFeatureGate.Enabled(features.PodLevelResources), + } + podRequests := podRequests(pod, opts) + podLimits := podLimits(pod, opts) for k, v := range limit.Min { if err := minConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil { errs = append(errs, err) @@ -550,13 +556,22 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error { return utilerrors.NewAggregate(errs) } +type podResourcesOptions struct { + // PodLevelResourcesEnabled indicates that the PodLevelResources feature gate is + // enabled. + PodLevelResourcesEnabled bool +} + // podRequests is a simplified version of pkg/api/v1/resource/PodRequests that operates against the core version of // pod. Any changes to that calculation should be reflected here. // NOTE: We do not want to check status resources here, only the spec. This is equivalent to setting // UseStatusResources=false in the common helper. // TODO: Maybe we can consider doing a partial conversion of the pod to a v1 // type and then using the pkg/api/v1/resource/PodRequests. -func podRequests(pod *api.Pod) api.ResourceList { +// TODO(ndixita): PodRequests method exists in +// staging/src/k8s.io/component-helpers/resource/helpers.go. Refactor the code to +// avoid duplicating podRequests method. +func podRequests(pod *api.Pod, opts podResourcesOptions) api.ResourceList { reqs := api.ResourceList{} for _, container := range pod.Spec.Containers { @@ -589,6 +604,19 @@ func podRequests(pod *api.Pod) api.ResourceList { } maxResourceList(reqs, initContainerReqs) + + // If PodLevelResources feature is enabled and resources are set at pod-level, + // override aggregated container requests of resources supported by pod-level + // resources with quantities specified at pod-level. + if opts.PodLevelResourcesEnabled && pod.Spec.Resources != nil { + for resourceName, quantity := range pod.Spec.Resources.Requests { + if isSupportedPodLevelResource(resourceName) { + // override with pod-level resource requests + reqs[resourceName] = quantity + } + } + } + return reqs } @@ -598,7 +626,10 @@ func podRequests(pod *api.Pod) api.ResourceList { // UseStatusResources=false in the common helper. // TODO: Maybe we can consider doing a partial conversion of the pod to a v1 // type and then using the pkg/api/v1/resource/PodLimits. -func podLimits(pod *api.Pod) api.ResourceList { +// TODO(ndixita): PodLimits method exists in +// staging/src/k8s.io/component-helpers/resource/helpers.go. Refactor the code to +// avoid duplicating podLimits method. +func podLimits(pod *api.Pod, opts podResourcesOptions) api.ResourceList { limits := api.ResourceList{} for _, container := range pod.Spec.Containers { @@ -628,9 +659,35 @@ func podLimits(pod *api.Pod) api.ResourceList { maxResourceList(limits, initContainerLimits) + // If PodLevelResources feature is enabled and resources are set at pod-level, + // override aggregated container limits of resources supported by pod-level + // resources with quantities specified at pod-level. + if opts.PodLevelResourcesEnabled && pod.Spec.Resources != nil { + for resourceName, quantity := range pod.Spec.Resources.Limits { + if isSupportedPodLevelResource(resourceName) { + // override with pod-level resource limits + limits[resourceName] = quantity + } + } + } + return limits } +var supportedPodLevelResources = sets.New(api.ResourceCPU, api.ResourceMemory) + +// isSupportedPodLevelResources checks if a given resource is supported by pod-level +// resource management through the PodLevelResources feature. Returns true if +// the resource is supported. +// isSupportedPodLevelResource method exists in +// staging/src/k8s.io/component-helpers/resource/helpers.go. +// isSupportedPodLevelResource is added here to avoid conversion of v1. +// Pod to api.Pod. +// TODO(ndixita): Find alternatives to avoid duplicating the code. +func isSupportedPodLevelResource(name api.ResourceName) bool { + return supportedPodLevelResources.Has(name) +} + // addResourceList adds the resources in newList to list. func addResourceList(list, newList api.ResourceList) { for name, quantity := range newList { diff --git a/plugin/pkg/admission/limitranger/admission_test.go b/plugin/pkg/admission/limitranger/admission_test.go index d8c4a2d3d1e..17fc3211af3 100644 --- a/plugin/pkg/admission/limitranger/admission_test.go +++ b/plugin/pkg/admission/limitranger/admission_test.go @@ -158,6 +158,12 @@ func validLimitRangeNoDefaults() corev1.LimitRange { return externalLimitRange } +func validPodWithPodLevelResources(name string, numContainers int, containerResources api.ResourceRequirements, podResources api.ResourceRequirements) api.Pod { + pod := validPod(name, numContainers, containerResources) + pod.Spec.Resources = &podResources + return pod +} + func validPod(name string, numContainers int, resources api.ResourceRequirements) api.Pod { pod := api.Pod{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"}, @@ -280,8 +286,9 @@ func TestMergePodResourceRequirements(t *testing.T) { func TestPodLimitFunc(t *testing.T) { type testCase struct { - pod api.Pod - limitRange corev1.LimitRange + pod api.Pod + limitRange corev1.LimitRange + podLevelResourcesEnabled bool } successCases := []testCase{ @@ -453,17 +460,42 @@ func TestPodLimitFunc(t *testing.T) { pod: validPod("pod-max-local-ephemeral-storage-ratio", 3, getResourceRequirements(getLocalStorageResourceList("300Mi"), getLocalStorageResourceList("450Mi"))), limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getLocalStorageResourceList("2Gi"), api.ResourceList{}, api.ResourceList{}, getLocalStorageResourceList("1.5")), }, + { + pod: validPodWithPodLevelResources("pod-level-resources-with-min-max", 3, getResourceRequirements(getComputeResourceList("100m", "60Mi"), getComputeResourceList("200m", "100Mi")), + getResourceRequirements(getComputeResourceList("200m", "180Mi"), getComputeResourceList("400m", "200Mi")), + ), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getComputeResourceList("400m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + podLevelResourcesEnabled: true, + }, + { + pod: validPodWithPodLevelResources("pod-level-requests-with-min", 3, getResourceRequirements(getComputeResourceList("50m", "60Mi"), getComputeResourceList("", "")), + getResourceRequirements(getComputeResourceList("160m", "200Mi"), getComputeResourceList("", "")), + ), + limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("160m", "200Mi"), getComputeResourceList("", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + podLevelResourcesEnabled: true, + }, + { + pod: validPodWithPodLevelResources("pod-level-limits-with-max", 3, getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("50m", "60Mi")), + getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("160m", "200Mi")), + ), + limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("", ""), getComputeResourceList("160m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + podLevelResourcesEnabled: true, + }, } for i := range successCases { test := successCases[i] - err := PodMutateLimitFunc(&test.limitRange, &test.pod) - if err != nil { - t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err) - } - err = PodValidateLimitFunc(&test.limitRange, &test.pod) - if err != nil { - t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err) - } + t.Run(test.pod.Name, func(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesEnabled) + err := PodMutateLimitFunc(&test.limitRange, &test.pod) + if err != nil { + t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err) + } + + err = PodValidateLimitFunc(&test.limitRange, &test.pod) + if err != nil { + t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err) + } + }) } errorCases := []testCase{ @@ -641,18 +673,41 @@ func TestPodLimitFunc(t *testing.T) { pod: withRestartableInitContainer(getComputeResourceList("1500m", ""), api.ResourceList{}, validPod("ctr-max-cpu-limit-restartable-init-container", 1, getResourceRequirements(getComputeResourceList("1000m", ""), getComputeResourceList("1500m", "")))), limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getComputeResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + }, { + pod: validPodWithPodLevelResources("pod-level-resources-exceeding-max", 3, getResourceRequirements(getComputeResourceList("100m", "60Mi"), getComputeResourceList("200m", "100Mi")), + getResourceRequirements(getComputeResourceList("200m", "180Mi"), getComputeResourceList("500m", "280Mi")), + ), + limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getComputeResourceList("400m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + podLevelResourcesEnabled: true, + }, + { + pod: validPodWithPodLevelResources("pod-level-requests-less-than-min", 3, getResourceRequirements(getComputeResourceList("50m", "60Mi"), getComputeResourceList("", "")), + getResourceRequirements(getComputeResourceList("100m", "200Mi"), getComputeResourceList("", "")), + ), + limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("160m", "200Mi"), getComputeResourceList("", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + podLevelResourcesEnabled: true, + }, + { + pod: validPodWithPodLevelResources("pod-level-limits-exceeding-max", 3, getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("50m", "60Mi")), + getResourceRequirements(getComputeResourceList("", ""), getComputeResourceList("160m", "300Mi")), + ), + limitRange: createLimitRange(api.LimitTypePod, getComputeResourceList("", ""), getComputeResourceList("160m", "200Mi"), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}), + podLevelResourcesEnabled: true, }, } for i := range errorCases { test := errorCases[i] - err := PodMutateLimitFunc(&test.limitRange, &test.pod) - if err != nil { - t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err) - } - err = PodValidateLimitFunc(&test.limitRange, &test.pod) - if err == nil { - t.Errorf("Expected error for pod: %s", test.pod.Name) - } + t.Run(test.pod.Name, func(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesEnabled) + err := PodMutateLimitFunc(&test.limitRange, &test.pod) + if err != nil { + t.Errorf("Unexpected error for pod: %s, %v", test.pod.Name, err) + } + err = PodValidateLimitFunc(&test.limitRange, &test.pod) + if err == nil { + t.Errorf("Expected error for pod: %s", test.pod.Name) + } + }) } }