Stop using status.AllocatedResources to aggregate resources

This commit is contained in:
Tim Allclair 2024-10-25 21:18:19 -07:00
parent 61c1beeda2
commit 81df195819
11 changed files with 251 additions and 127 deletions

View File

@ -117,18 +117,17 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
}
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.InPlacePodVerticalScaling)
// sum requests and limits.
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled,
func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{
// pod is already configured to the allocated resources, and we explicitly don't want to use
// the actual resources if we're instantiating a resize.
UseStatusResources: false,
})
// track if limits were applied for each resource.
memoryLimitsDeclared := true
cpuLimitsDeclared := true
limits := resource.PodLimits(pod, resource.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled,
limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{
ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
if res.Cpu().IsZero() {
cpuLimitsDeclared = false
@ -164,7 +163,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
}
// determine the qos class
qosClass := v1qos.GetPodQOS(pod)
qosClass := v1qos.GetPodQOS(allocatedPod)
// build the result
result := &ResourceConfig{}

View File

@ -365,7 +365,7 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
}
opts := resourcehelper.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
UseStatusResources: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
}
requests := resourcehelper.PodRequests(pod, opts)
limits := resourcehelper.PodLimits(pod, opts)

View File

@ -906,7 +906,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
usageFgEnabled corev1.ResourceList
usageFgDisabled corev1.ResourceList
}{
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for memory resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for memory resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
@ -925,8 +925,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
AllocatedResources: api.ResourceList{
api.ResourceMemory: resource.MustParse("150Mi"),
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceMemory: resource.MustParse("150Mi"),
},
},
},
},
@ -947,7 +949,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
@ -966,8 +968,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
AllocatedResources: api.ResourceList{
api.ResourceCPU: resource.MustParse("150m"),
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: resource.MustParse("150m"),
},
},
},
},
@ -988,7 +992,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU and memory resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU and memory resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
@ -1009,9 +1013,11 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
AllocatedResources: api.ResourceList{
api.ResourceCPU: resource.MustParse("150m"),
api.ResourceMemory: resource.MustParse("250Mi"),
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: resource.MustParse("150m"),
api.ResourceMemory: resource.MustParse("250Mi"),
},
},
},
},
@ -1038,7 +1044,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources==nil) for CPU and memory resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.Resources==nil) for CPU and memory resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{

View File

@ -91,7 +91,7 @@ type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) ActionType
// extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down.
func extractPodScaleDown(newPod, oldPod *v1.Pod) ActionType {
opt := resource.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
}
newPodRequests := resource.PodRequests(newPod, opt)
oldPodRequests := resource.PodRequests(oldPod, opt)

View File

@ -330,11 +330,11 @@ func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod
// the other pod was scheduled, so modification or deletion may free up some resources.
originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{}
originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling}))
modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling}))
originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
// check whether the resource request of the modified pod is less than the original pod.
podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling})
podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})
for rName, rValue := range podRequests {
if rValue.IsZero() {
// We only care about the resources requested by the pod we are trying to schedule.

View File

@ -118,7 +118,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger kl
func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 {
opts := resourcehelper.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
}
if !r.useRequested {
opts.NonMissingContainerRequests = v1.ResourceList{

View File

@ -1054,11 +1054,11 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
})
non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),

View File

@ -1533,9 +1533,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
restartAlways := v1.ContainerRestartPolicyAlways
preparePod := func(pod v1.Pod,
requests, allocatedResources,
initRequests, initAllocatedResources,
sidecarRequests, sidecarAllocatedResources *v1.ResourceList,
requests, statusResources,
initRequests, initStatusResources,
sidecarRequests, sidecarStatusResources *v1.ResourceList,
resizeStatus v1.PodResizeStatus) v1.Pod {
if requests != nil {
@ -1545,11 +1545,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
Resources: v1.ResourceRequirements{Requests: *requests},
})
}
if allocatedResources != nil {
if statusResources != nil {
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses,
v1.ContainerStatus{
Name: "c1",
AllocatedResources: *allocatedResources,
Name: "c1",
Resources: &v1.ResourceRequirements{
Requests: *statusResources,
},
})
}
@ -1561,11 +1563,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
},
)
}
if initAllocatedResources != nil {
if initStatusResources != nil {
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
v1.ContainerStatus{
Name: "i1",
AllocatedResources: *initAllocatedResources,
Name: "i1",
Resources: &v1.ResourceRequirements{
Requests: *initStatusResources,
},
})
}
@ -1578,11 +1582,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
},
)
}
if sidecarAllocatedResources != nil {
if sidecarStatusResources != nil {
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
v1.ContainerStatus{
Name: "s1",
AllocatedResources: *sidecarAllocatedResources,
Name: "s1",
Resources: &v1.ResourceRequirements{
Requests: *sidecarStatusResources,
},
})
}
@ -1591,74 +1597,74 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
}
tests := []struct {
name string
requests v1.ResourceList
allocatedResources v1.ResourceList
initRequests *v1.ResourceList
initAllocatedResources *v1.ResourceList
sidecarRequests *v1.ResourceList
sidecarAllocatedResources *v1.ResourceList
resizeStatus v1.PodResizeStatus
expectedResource Resource
expectedNon0CPU int64
expectedNon0Mem int64
name string
requests v1.ResourceList
statusResources v1.ResourceList
initRequests *v1.ResourceList
initStatusResources *v1.ResourceList
sidecarRequests *v1.ResourceList
sidecarStatusResources *v1.ResourceList
resizeStatus v1.PodResizeStatus
expectedResource Resource
expectedNon0CPU int64
expectedNon0Mem int64
}{
{
name: "Pod with no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: "",
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(),
name: "Pod with no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: "",
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(),
},
{
name: "Pod with resize in progress",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusInProgress,
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(),
name: "Pod with resize in progress",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusInProgress,
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(),
},
{
name: "Pod with deferred resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusDeferred,
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
expectedNon0CPU: cpu700m.MilliValue(),
expectedNon0Mem: mem800M.Value(),
name: "Pod with deferred resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusDeferred,
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
expectedNon0CPU: cpu700m.MilliValue(),
expectedNon0Mem: mem800M.Value(),
},
{
name: "Pod with infeasible resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusInfeasible,
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(),
name: "Pod with infeasible resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusInfeasible,
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(),
},
{
name: "Pod with init container and no pending resize",
name: "Pod with init container and no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
resizeStatus: "",
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
expectedNon0CPU: cpu700m.MilliValue(),
expectedNon0Mem: mem800M.Value(),
},
{
name: "Pod with sider container and no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
sidecarStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
resizeStatus: "",
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
expectedNon0CPU: cpu700m.MilliValue(),
expectedNon0Mem: mem800M.Value(),
},
{
name: "Pod with sider container and no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
resizeStatus: "",
expectedResource: Resource{
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
Memory: mem500M.Value() + mem800M.Value(),
@ -1671,9 +1677,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pod := preparePod(*testpod.DeepCopy(),
&tt.requests, &tt.allocatedResources,
tt.initRequests, tt.initAllocatedResources,
tt.sidecarRequests, tt.sidecarAllocatedResources,
&tt.requests, &tt.statusResources,
tt.initRequests, tt.initStatusResources,
tt.sidecarRequests, tt.sidecarStatusResources,
tt.resizeStatus)
res, non0CPU, non0Mem := calculateResource(&pod)

View File

@ -35,8 +35,10 @@ type PodResourcesOptions struct {
// Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits
// functions. All existing values in Reuse will be lost.
Reuse v1.ResourceList
// InPlacePodVerticalScalingEnabled indicates that the in-place pod vertical scaling feature gate is enabled.
InPlacePodVerticalScalingEnabled bool
// UseStatusResources indicates whether resources reported by the PodStatus should be considered
// when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
// feature is not enabled.
UseStatusResources bool
// ExcludeOverhead controls if pod overhead is excluded from the calculation.
ExcludeOverhead bool
// ContainerFn is called with the effective resources required for each container within the pod.
@ -54,7 +56,7 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
reqs := reuseOrClearResourceList(opts.Reuse)
var containerStatuses map[string]*v1.ContainerStatus
if opts.InPlacePodVerticalScalingEnabled {
if opts.UseStatusResources {
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
@ -63,13 +65,13 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
for _, container := range pod.Spec.Containers {
containerReqs := container.Resources.Requests
if opts.InPlacePodVerticalScalingEnabled {
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found {
if found && cs.Resources != nil {
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
containerReqs = cs.AllocatedResources.DeepCopy()
containerReqs = cs.Resources.Requests.DeepCopy()
} else {
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
}
}
}
@ -155,11 +157,31 @@ func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
limits := reuseOrClearResourceList(opts.Reuse)
for _, container := range pod.Spec.Containers {
if opts.ContainerFn != nil {
opts.ContainerFn(container.Resources.Limits, Containers)
var containerStatuses map[string]*v1.ContainerStatus
if opts.UseStatusResources {
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}
addResourceList(limits, container.Resources.Limits)
}
for _, container := range pod.Spec.Containers {
containerLimits := container.Resources.Limits
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
containerLimits = cs.Resources.Limits.DeepCopy()
} else {
containerLimits = max(container.Resources.Limits, cs.Resources.Limits)
}
}
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerLimits, Containers)
}
addResourceList(limits, containerLimits)
}
restartableInitContainerLimits := v1.ResourceList{}

View File

@ -432,7 +432,7 @@ func TestPodResourceRequests(t *testing.T) {
v1.ResourceCPU: resource.MustParse("2"),
},
podResizeStatus: v1.PodResizeStatusInfeasible,
options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true},
options: PodResourcesOptions{UseStatusResources: true},
containers: []v1.Container{
{
Name: "container-1",
@ -446,8 +446,10 @@ func TestPodResourceRequests(t *testing.T) {
containerStatus: []v1.ContainerStatus{
{
Name: "container-1",
AllocatedResources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
},
@ -457,7 +459,7 @@ func TestPodResourceRequests(t *testing.T) {
expectedRequests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true},
options: PodResourcesOptions{UseStatusResources: true},
containers: []v1.Container{
{
Name: "container-1",
@ -471,19 +473,21 @@ func TestPodResourceRequests(t *testing.T) {
containerStatus: []v1.ContainerStatus{
{
Name: "container-1",
AllocatedResources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
},
},
{
description: "resized, infeasible, feature gate disabled",
description: "resized, infeasible, but don't use status",
expectedRequests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
podResizeStatus: v1.PodResizeStatusInfeasible,
options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: false},
options: PodResourcesOptions{UseStatusResources: false},
containers: []v1.Container{
{
Name: "container-1",
@ -497,8 +501,10 @@ func TestPodResourceRequests(t *testing.T) {
containerStatus: []v1.ContainerStatus{
{
Name: "container-1",
AllocatedResources: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
Resources: &v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
},
@ -742,12 +748,13 @@ func TestPodResourceRequestsReuse(t *testing.T) {
func TestPodResourceLimits(t *testing.T) {
restartAlways := v1.ContainerRestartPolicyAlways
testCases := []struct {
description string
options PodResourcesOptions
overhead v1.ResourceList
initContainers []v1.Container
containers []v1.Container
expectedLimits v1.ResourceList
description string
options PodResourcesOptions
overhead v1.ResourceList
initContainers []v1.Container
containers []v1.Container
containerStatuses []v1.ContainerStatus
expectedLimits v1.ResourceList
}{
{
description: "nil options, larger init container",
@ -1119,6 +1126,87 @@ func TestPodResourceLimits(t *testing.T) {
},
},
},
{
description: "pod scaled up",
expectedLimits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("2Gi"),
},
options: PodResourcesOptions{UseStatusResources: true},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
},
containerStatuses: []v1.ContainerStatus{
{
Name: "container-1",
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("1Gi"),
},
},
},
},
},
{
description: "pod scaled down",
expectedLimits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("2Gi"),
},
options: PodResourcesOptions{UseStatusResources: true},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("1Gi"),
},
},
},
},
containerStatuses: []v1.ContainerStatus{
{
Name: "container-1",
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
},
},
{
description: "pod scaled down, don't use status",
expectedLimits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("1Gi"),
},
options: PodResourcesOptions{UseStatusResources: false},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("1Gi"),
},
},
},
},
containerStatuses: []v1.ContainerStatus{
{
Name: "container-1",
Resources: &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
@ -1128,6 +1216,9 @@ func TestPodResourceLimits(t *testing.T) {
InitContainers: tc.initContainers,
Overhead: tc.overhead,
},
Status: v1.PodStatus{
ContainerStatuses: tc.containerStatuses,
},
}
limits := PodLimits(p, tc.options)
if diff := cmp.Diff(limits, tc.expectedLimits); diff != "" {

View File

@ -49,11 +49,11 @@ func podRequests(pod *corev1.Pod) corev1.ResourceList {
for _, container := range pod.Spec.Containers {
containerReqs := container.Resources.Requests
cs, found := containerStatuses[container.Name]
if found {
if found && cs.Resources != nil {
if pod.Status.Resize == corev1.PodResizeStatusInfeasible {
containerReqs = cs.AllocatedResources.DeepCopy()
containerReqs = cs.Resources.Requests.DeepCopy()
} else {
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
}
}
addResourceList(reqs, containerReqs)