From 81df1958196e04f04d038e5ec9e2025989a30b29 Mon Sep 17 00:00:00 2001 From: Tim Allclair Date: Fri, 25 Oct 2024 21:18:19 -0700 Subject: [PATCH] Stop using status.AllocatedResources to aggregate resources --- pkg/kubelet/cm/helpers_linux.go | 15 +- pkg/quota/v1/evaluator/core/pods.go | 2 +- pkg/quota/v1/evaluator/core/pods_test.go | 28 ++-- pkg/scheduler/framework/events.go | 2 +- .../framework/plugins/noderesources/fit.go | 6 +- .../noderesources/resource_allocation.go | 2 +- pkg/scheduler/framework/types.go | 4 +- pkg/scheduler/framework/types_test.go | 146 +++++++++--------- .../component-helpers/resource/helpers.go | 44 ++++-- .../resource/helpers_test.go | 123 +++++++++++++-- .../kubectl/pkg/util/resource/resource.go | 6 +- 11 files changed, 251 insertions(+), 127 deletions(-) diff --git a/pkg/kubelet/cm/helpers_linux.go b/pkg/kubelet/cm/helpers_linux.go index f50a0122823..5500621c9ab 100644 --- a/pkg/kubelet/cm/helpers_linux.go +++ b/pkg/kubelet/cm/helpers_linux.go @@ -117,18 +117,17 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 { } // ResourceConfigForPod takes the input pod and outputs the cgroup resource config. -func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig { - inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.InPlacePodVerticalScaling) - // sum requests and limits. - reqs := resource.PodRequests(pod, resource.PodResourcesOptions{ - InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled, +func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig { + reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{ + // pod is already configured to the allocated resources, and we explicitly don't want to use + // the actual resources if we're instantiating a resize. + UseStatusResources: false, }) // track if limits were applied for each resource. memoryLimitsDeclared := true cpuLimitsDeclared := true - limits := resource.PodLimits(pod, resource.PodResourcesOptions{ - InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled, + limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{ ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) { if res.Cpu().IsZero() { cpuLimitsDeclared = false @@ -164,7 +163,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, } // determine the qos class - qosClass := v1qos.GetPodQOS(pod) + qosClass := v1qos.GetPodQOS(allocatedPod) // build the result result := &ResourceConfig{} diff --git a/pkg/quota/v1/evaluator/core/pods.go b/pkg/quota/v1/evaluator/core/pods.go index e8a6096e395..c82fa98437c 100644 --- a/pkg/quota/v1/evaluator/core/pods.go +++ b/pkg/quota/v1/evaluator/core/pods.go @@ -365,7 +365,7 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e } opts := resourcehelper.PodResourcesOptions{ - InPlacePodVerticalScalingEnabled: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), + UseStatusResources: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), } requests := resourcehelper.PodRequests(pod, opts) limits := resourcehelper.PodLimits(pod, opts) diff --git a/pkg/quota/v1/evaluator/core/pods_test.go b/pkg/quota/v1/evaluator/core/pods_test.go index d10b95a0a29..f92e7def0f0 100644 --- a/pkg/quota/v1/evaluator/core/pods_test.go +++ b/pkg/quota/v1/evaluator/core/pods_test.go @@ -906,7 +906,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) { usageFgEnabled corev1.ResourceList usageFgDisabled corev1.ResourceList }{ - "verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for memory resource": { + "verify Max(Container.Spec.Requests, ContainerStatus.Resources) for memory resource": { pod: &api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{ @@ -925,8 +925,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) { Status: api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { - AllocatedResources: api.ResourceList{ - api.ResourceMemory: resource.MustParse("150Mi"), + Resources: &api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceMemory: resource.MustParse("150Mi"), + }, }, }, }, @@ -947,7 +949,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) { generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, - "verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU resource": { + "verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU resource": { pod: &api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{ @@ -966,8 +968,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) { Status: api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { - AllocatedResources: api.ResourceList{ - api.ResourceCPU: resource.MustParse("150m"), + Resources: &api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("150m"), + }, }, }, }, @@ -988,7 +992,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) { generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, - "verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU and memory resource": { + "verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU and memory resource": { pod: &api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{ @@ -1009,9 +1013,11 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) { Status: api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { - AllocatedResources: api.ResourceList{ - api.ResourceCPU: resource.MustParse("150m"), - api.ResourceMemory: resource.MustParse("250Mi"), + Resources: &api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: resource.MustParse("150m"), + api.ResourceMemory: resource.MustParse("250Mi"), + }, }, }, }, @@ -1038,7 +1044,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) { generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, - "verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources==nil) for CPU and memory resource": { + "verify Max(Container.Spec.Requests, ContainerStatus.Resources==nil) for CPU and memory resource": { pod: &api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{ diff --git a/pkg/scheduler/framework/events.go b/pkg/scheduler/framework/events.go index f5298546fd5..e6b628028c5 100644 --- a/pkg/scheduler/framework/events.go +++ b/pkg/scheduler/framework/events.go @@ -91,7 +91,7 @@ type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) ActionType // extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down. func extractPodScaleDown(newPod, oldPod *v1.Pod) ActionType { opt := resource.PodResourcesOptions{ - InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), + UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), } newPodRequests := resource.PodRequests(newPod, opt) oldPodRequests := resource.PodRequests(oldPod, opt) diff --git a/pkg/scheduler/framework/plugins/noderesources/fit.go b/pkg/scheduler/framework/plugins/noderesources/fit.go index 5421f5326e5..0a7f387a42d 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -330,11 +330,11 @@ func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod // the other pod was scheduled, so modification or deletion may free up some resources. originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{} - originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling})) - modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling})) + originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})) + modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})) // check whether the resource request of the modified pod is less than the original pod. - podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling}) + podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}) for rName, rValue := range podRequests { if rValue.IsZero() { // We only care about the resources requested by the pod we are trying to schedule. diff --git a/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go b/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go index 752fcc89f38..118fb7e07c1 100644 --- a/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go +++ b/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go @@ -118,7 +118,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger kl func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 { opts := resourcehelper.PodResourcesOptions{ - InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), + UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), } if !r.useRequested { opts.NonMissingContainerRequests = v1.ResourceList{ diff --git a/pkg/scheduler/framework/types.go b/pkg/scheduler/framework/types.go index d255cfb0332..c0dedaf5340 100644 --- a/pkg/scheduler/framework/types.go +++ b/pkg/scheduler/framework/types.go @@ -1054,11 +1054,11 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) { func calculateResource(pod *v1.Pod) (Resource, int64, int64) { requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{ - InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), + UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), }) non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{ - InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), + UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{ v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI), diff --git a/pkg/scheduler/framework/types_test.go b/pkg/scheduler/framework/types_test.go index ed354387dae..8972aca1cdb 100644 --- a/pkg/scheduler/framework/types_test.go +++ b/pkg/scheduler/framework/types_test.go @@ -1533,9 +1533,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways preparePod := func(pod v1.Pod, - requests, allocatedResources, - initRequests, initAllocatedResources, - sidecarRequests, sidecarAllocatedResources *v1.ResourceList, + requests, statusResources, + initRequests, initStatusResources, + sidecarRequests, sidecarStatusResources *v1.ResourceList, resizeStatus v1.PodResizeStatus) v1.Pod { if requests != nil { @@ -1545,11 +1545,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { Resources: v1.ResourceRequirements{Requests: *requests}, }) } - if allocatedResources != nil { + if statusResources != nil { pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{ - Name: "c1", - AllocatedResources: *allocatedResources, + Name: "c1", + Resources: &v1.ResourceRequirements{ + Requests: *statusResources, + }, }) } @@ -1561,11 +1563,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { }, ) } - if initAllocatedResources != nil { + if initStatusResources != nil { pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses, v1.ContainerStatus{ - Name: "i1", - AllocatedResources: *initAllocatedResources, + Name: "i1", + Resources: &v1.ResourceRequirements{ + Requests: *initStatusResources, + }, }) } @@ -1578,11 +1582,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { }, ) } - if sidecarAllocatedResources != nil { + if sidecarStatusResources != nil { pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses, v1.ContainerStatus{ - Name: "s1", - AllocatedResources: *sidecarAllocatedResources, + Name: "s1", + Resources: &v1.ResourceRequirements{ + Requests: *sidecarStatusResources, + }, }) } @@ -1591,74 +1597,74 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { } tests := []struct { - name string - requests v1.ResourceList - allocatedResources v1.ResourceList - initRequests *v1.ResourceList - initAllocatedResources *v1.ResourceList - sidecarRequests *v1.ResourceList - sidecarAllocatedResources *v1.ResourceList - resizeStatus v1.PodResizeStatus - expectedResource Resource - expectedNon0CPU int64 - expectedNon0Mem int64 + name string + requests v1.ResourceList + statusResources v1.ResourceList + initRequests *v1.ResourceList + initStatusResources *v1.ResourceList + sidecarRequests *v1.ResourceList + sidecarStatusResources *v1.ResourceList + resizeStatus v1.PodResizeStatus + expectedResource Resource + expectedNon0CPU int64 + expectedNon0Mem int64 }{ { - name: "Pod with no pending resize", - requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - resizeStatus: "", - expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()}, - expectedNon0CPU: cpu500m.MilliValue(), - expectedNon0Mem: mem500M.Value(), + name: "Pod with no pending resize", + requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + resizeStatus: "", + expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()}, + expectedNon0CPU: cpu500m.MilliValue(), + expectedNon0Mem: mem500M.Value(), }, { - name: "Pod with resize in progress", - requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - resizeStatus: v1.PodResizeStatusInProgress, - expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()}, - expectedNon0CPU: cpu500m.MilliValue(), - expectedNon0Mem: mem500M.Value(), + name: "Pod with resize in progress", + requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + resizeStatus: v1.PodResizeStatusInProgress, + expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()}, + expectedNon0CPU: cpu500m.MilliValue(), + expectedNon0Mem: mem500M.Value(), }, { - name: "Pod with deferred resize", - requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, - allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - resizeStatus: v1.PodResizeStatusDeferred, - expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()}, - expectedNon0CPU: cpu700m.MilliValue(), - expectedNon0Mem: mem800M.Value(), + name: "Pod with deferred resize", + requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + resizeStatus: v1.PodResizeStatusDeferred, + expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()}, + expectedNon0CPU: cpu700m.MilliValue(), + expectedNon0Mem: mem800M.Value(), }, { - name: "Pod with infeasible resize", - requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, - allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - resizeStatus: v1.PodResizeStatusInfeasible, - expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()}, - expectedNon0CPU: cpu500m.MilliValue(), - expectedNon0Mem: mem500M.Value(), + name: "Pod with infeasible resize", + requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + resizeStatus: v1.PodResizeStatusInfeasible, + expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()}, + expectedNon0CPU: cpu500m.MilliValue(), + expectedNon0Mem: mem500M.Value(), }, { - name: "Pod with init container and no pending resize", + name: "Pod with init container and no pending resize", + requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + resizeStatus: "", + expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()}, + expectedNon0CPU: cpu700m.MilliValue(), + expectedNon0Mem: mem800M.Value(), + }, + { + name: "Pod with sider container and no pending resize", requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, - initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + sidecarStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, resizeStatus: "", - expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()}, - expectedNon0CPU: cpu700m.MilliValue(), - expectedNon0Mem: mem800M.Value(), - }, - { - name: "Pod with sider container and no pending resize", - requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, - initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, - sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, - sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, - resizeStatus: "", expectedResource: Resource{ MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(), Memory: mem500M.Value() + mem800M.Value(), @@ -1671,9 +1677,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { pod := preparePod(*testpod.DeepCopy(), - &tt.requests, &tt.allocatedResources, - tt.initRequests, tt.initAllocatedResources, - tt.sidecarRequests, tt.sidecarAllocatedResources, + &tt.requests, &tt.statusResources, + tt.initRequests, tt.initStatusResources, + tt.sidecarRequests, tt.sidecarStatusResources, tt.resizeStatus) res, non0CPU, non0Mem := calculateResource(&pod) diff --git a/staging/src/k8s.io/component-helpers/resource/helpers.go b/staging/src/k8s.io/component-helpers/resource/helpers.go index 1d83aa8973a..a8b252cfdd4 100644 --- a/staging/src/k8s.io/component-helpers/resource/helpers.go +++ b/staging/src/k8s.io/component-helpers/resource/helpers.go @@ -35,8 +35,10 @@ type PodResourcesOptions struct { // Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits // functions. All existing values in Reuse will be lost. Reuse v1.ResourceList - // InPlacePodVerticalScalingEnabled indicates that the in-place pod vertical scaling feature gate is enabled. - InPlacePodVerticalScalingEnabled bool + // UseStatusResources indicates whether resources reported by the PodStatus should be considered + // when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling + // feature is not enabled. + UseStatusResources bool // ExcludeOverhead controls if pod overhead is excluded from the calculation. ExcludeOverhead bool // ContainerFn is called with the effective resources required for each container within the pod. @@ -54,7 +56,7 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { reqs := reuseOrClearResourceList(opts.Reuse) var containerStatuses map[string]*v1.ContainerStatus - if opts.InPlacePodVerticalScalingEnabled { + if opts.UseStatusResources { containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) for i := range pod.Status.ContainerStatuses { containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] @@ -63,13 +65,13 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { for _, container := range pod.Spec.Containers { containerReqs := container.Resources.Requests - if opts.InPlacePodVerticalScalingEnabled { + if opts.UseStatusResources { cs, found := containerStatuses[container.Name] - if found { + if found && cs.Resources != nil { if pod.Status.Resize == v1.PodResizeStatusInfeasible { - containerReqs = cs.AllocatedResources.DeepCopy() + containerReqs = cs.Resources.Requests.DeepCopy() } else { - containerReqs = max(container.Resources.Requests, cs.AllocatedResources) + containerReqs = max(container.Resources.Requests, cs.Resources.Requests) } } } @@ -155,11 +157,31 @@ func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList { // attempt to reuse the maps if passed, or allocate otherwise limits := reuseOrClearResourceList(opts.Reuse) - for _, container := range pod.Spec.Containers { - if opts.ContainerFn != nil { - opts.ContainerFn(container.Resources.Limits, Containers) + var containerStatuses map[string]*v1.ContainerStatus + if opts.UseStatusResources { + containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)) + for i := range pod.Status.ContainerStatuses { + containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i] } - addResourceList(limits, container.Resources.Limits) + } + + for _, container := range pod.Spec.Containers { + containerLimits := container.Resources.Limits + if opts.UseStatusResources { + cs, found := containerStatuses[container.Name] + if found && cs.Resources != nil { + if pod.Status.Resize == v1.PodResizeStatusInfeasible { + containerLimits = cs.Resources.Limits.DeepCopy() + } else { + containerLimits = max(container.Resources.Limits, cs.Resources.Limits) + } + } + } + + if opts.ContainerFn != nil { + opts.ContainerFn(containerLimits, Containers) + } + addResourceList(limits, containerLimits) } restartableInitContainerLimits := v1.ResourceList{} diff --git a/staging/src/k8s.io/component-helpers/resource/helpers_test.go b/staging/src/k8s.io/component-helpers/resource/helpers_test.go index 9d993f48eb6..f2d984cf65d 100644 --- a/staging/src/k8s.io/component-helpers/resource/helpers_test.go +++ b/staging/src/k8s.io/component-helpers/resource/helpers_test.go @@ -432,7 +432,7 @@ func TestPodResourceRequests(t *testing.T) { v1.ResourceCPU: resource.MustParse("2"), }, podResizeStatus: v1.PodResizeStatusInfeasible, - options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true}, + options: PodResourcesOptions{UseStatusResources: true}, containers: []v1.Container{ { Name: "container-1", @@ -446,8 +446,10 @@ func TestPodResourceRequests(t *testing.T) { containerStatus: []v1.ContainerStatus{ { Name: "container-1", - AllocatedResources: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2"), + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, }, }, }, @@ -457,7 +459,7 @@ func TestPodResourceRequests(t *testing.T) { expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, - options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true}, + options: PodResourcesOptions{UseStatusResources: true}, containers: []v1.Container{ { Name: "container-1", @@ -471,19 +473,21 @@ func TestPodResourceRequests(t *testing.T) { containerStatus: []v1.ContainerStatus{ { Name: "container-1", - AllocatedResources: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2"), + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, }, }, }, }, { - description: "resized, infeasible, feature gate disabled", + description: "resized, infeasible, but don't use status", expectedRequests: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("4"), }, podResizeStatus: v1.PodResizeStatusInfeasible, - options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: false}, + options: PodResourcesOptions{UseStatusResources: false}, containers: []v1.Container{ { Name: "container-1", @@ -497,8 +501,10 @@ func TestPodResourceRequests(t *testing.T) { containerStatus: []v1.ContainerStatus{ { Name: "container-1", - AllocatedResources: v1.ResourceList{ - v1.ResourceCPU: resource.MustParse("2"), + Resources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + }, }, }, }, @@ -742,12 +748,13 @@ func TestPodResourceRequestsReuse(t *testing.T) { func TestPodResourceLimits(t *testing.T) { restartAlways := v1.ContainerRestartPolicyAlways testCases := []struct { - description string - options PodResourcesOptions - overhead v1.ResourceList - initContainers []v1.Container - containers []v1.Container - expectedLimits v1.ResourceList + description string + options PodResourcesOptions + overhead v1.ResourceList + initContainers []v1.Container + containers []v1.Container + containerStatuses []v1.ContainerStatus + expectedLimits v1.ResourceList }{ { description: "nil options, larger init container", @@ -1119,6 +1126,87 @@ func TestPodResourceLimits(t *testing.T) { }, }, }, + { + description: "pod scaled up", + expectedLimits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + options: PodResourcesOptions{UseStatusResources: true}, + containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + containerStatuses: []v1.ContainerStatus{ + { + Name: "container-1", + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + { + description: "pod scaled down", + expectedLimits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + options: PodResourcesOptions{UseStatusResources: true}, + containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + containerStatuses: []v1.ContainerStatus{ + { + Name: "container-1", + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + { + description: "pod scaled down, don't use status", + expectedLimits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + options: PodResourcesOptions{UseStatusResources: false}, + containers: []v1.Container{ + { + Name: "container-1", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + containerStatuses: []v1.ContainerStatus{ + { + Name: "container-1", + Resources: &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, } for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { @@ -1128,6 +1216,9 @@ func TestPodResourceLimits(t *testing.T) { InitContainers: tc.initContainers, Overhead: tc.overhead, }, + Status: v1.PodStatus{ + ContainerStatuses: tc.containerStatuses, + }, } limits := PodLimits(p, tc.options) if diff := cmp.Diff(limits, tc.expectedLimits); diff != "" { diff --git a/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go b/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go index cc60a64b32e..80da5fdd66d 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go +++ b/staging/src/k8s.io/kubectl/pkg/util/resource/resource.go @@ -49,11 +49,11 @@ func podRequests(pod *corev1.Pod) corev1.ResourceList { for _, container := range pod.Spec.Containers { containerReqs := container.Resources.Requests cs, found := containerStatuses[container.Name] - if found { + if found && cs.Resources != nil { if pod.Status.Resize == corev1.PodResizeStatusInfeasible { - containerReqs = cs.AllocatedResources.DeepCopy() + containerReqs = cs.Resources.Requests.DeepCopy() } else { - containerReqs = max(container.Resources.Requests, cs.AllocatedResources) + containerReqs = max(container.Resources.Requests, cs.Resources.Requests) } } addResourceList(reqs, containerReqs)