Rename ContainerStatus.ResourcesAllocated to ContainerStatus.AllocatedResources

This commit is contained in:
vinay kulkarni 2023-03-10 03:35:35 +00:00
parent 90c3232de7
commit 01b96e7704
19 changed files with 81 additions and 81 deletions

View File

@ -552,10 +552,10 @@ func dropDisabledPodStatusFields(podStatus, oldPodStatus *api.PodStatus, podSpec
}
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
// Drop Resize, ResourcesAllocated, and Resources fields
// Drop Resize, AllocatedResources, and Resources fields
dropResourcesFields := func(csl []api.ContainerStatus) {
for i := range csl {
csl[i].ResourcesAllocated = nil
csl[i].AllocatedResources = nil
csl[i].Resources = nil
}
}
@ -861,7 +861,7 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) {
return api.ContainerStatus{}, false
}
if cs, ok := findContainerStatus(newPod.Status.ContainerStatuses, c.Name); ok {
if diff.ObjectDiff(c.Resources.Requests, cs.ResourcesAllocated) != "" {
if diff.ObjectDiff(c.Resources.Requests, cs.AllocatedResources) != "" {
newPod.Status.Resize = api.PodResizeStatusProposed
break
}

View File

@ -2302,7 +2302,7 @@ func TestDropInPlacePodVerticalScaling(t *testing.T) {
{
Name: "c1",
Image: "image",
ResourcesAllocated: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
Resources: &api.ResourceRequirements{
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
@ -2573,12 +2573,12 @@ func TestMarkPodProposedForResize(t *testing.T) {
{
Name: "c1",
Image: "image",
ResourcesAllocated: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
},
{
Name: "c2",
Image: "image",
ResourcesAllocated: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
},
@ -2609,12 +2609,12 @@ func TestMarkPodProposedForResize(t *testing.T) {
{
Name: "c1",
Image: "image",
ResourcesAllocated: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
},
{
Name: "c2",
Image: "image",
ResourcesAllocated: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
},
@ -2646,12 +2646,12 @@ func TestMarkPodProposedForResize(t *testing.T) {
{
Name: "c1",
Image: "image",
ResourcesAllocated: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
},
{
Name: "c2",
Image: "image",
ResourcesAllocated: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
},
},
},

View File

@ -2492,12 +2492,12 @@ type ContainerStatus struct {
// same as false.
// +optional
Started *bool
// ResourcesAllocated represents the compute resources allocated for this container by the
// AllocatedResources represents the compute resources allocated for this container by the
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
// and after successfully admitting desired pod resize.
// +featureGate=InPlacePodVerticalScaling
// +optional
ResourcesAllocated ResourceList
AllocatedResources ResourceList
// Resources represents the compute resource requests and limits that have been successfully
// enacted on the running container after it has been started or has been successfully resized.
// +featureGate=InPlacePodVerticalScaling

View File

@ -424,12 +424,12 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
}
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
// In-place pod resize feature makes Container.Resources field mutable for CPU & memory.
// ResourcesAllocated holds the value of Container.Resources.Requests when the pod was admitted.
// AllocatedResources holds the value of Container.Resources.Requests when the pod was admitted.
// We should return this value because this is what kubelet agreed to allocate for the container
// and the value configured with runtime.
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok {
cpuQuantity = cs.ResourcesAllocated[v1.ResourceCPU]
cpuQuantity = cs.AllocatedResources[v1.ResourceCPU]
}
}
if cpuQuantity.Value()*1000 != cpuQuantity.MilliValue() {

View File

@ -415,12 +415,12 @@ func getRequestedResources(pod *v1.Pod, container *v1.Container) (map[v1.Resourc
requestedResources := map[v1.ResourceName]uint64{}
resources := container.Resources.Requests
// In-place pod resize feature makes Container.Resources field mutable for CPU & memory.
// ResourcesAllocated holds the value of Container.Resources.Requests when the pod was admitted.
// AllocatedResources holds the value of Container.Resources.Requests when the pod was admitted.
// We should return this value because this is what kubelet agreed to allocate for the container
// and the value configured with runtime.
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok {
resources = cs.ResourcesAllocated
resources = cs.AllocatedResources
}
}
for resourceName, quantity := range resources {

View File

@ -1024,7 +1024,7 @@ func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats stats
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
(resourceToReclaim == v1.ResourceMemory || resourceToReclaim == v1.ResourceCPU) {
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok {
requests = cs.ResourcesAllocated[resourceToReclaim]
requests = cs.AllocatedResources[resourceToReclaim]
}
}
var usage *resource.Quantity

View File

@ -2134,7 +2134,7 @@ func TestEvictonMessageWithResourceResize(t *testing.T) {
ContainerStatuses: []v1.ContainerStatus{
{
Name: "testcontainer",
ResourcesAllocated: newResourceList("", "100Mi", ""),
AllocatedResources: newResourceList("", "100Mi", ""),
},
},
}

View File

@ -2129,10 +2129,10 @@ func (kl *Kubelet) canAdmitPod(pods []*v1.Pod, pod *v1.Pod) (bool, string, strin
for _, p := range pods {
op := p.DeepCopy()
for _, c := range op.Spec.Containers {
resourcesAllocated, found := kl.statusManager.GetContainerResourceAllocation(string(p.UID), c.Name)
allocatedResources, found := kl.statusManager.GetContainerResourceAllocation(string(p.UID), c.Name)
if c.Resources.Requests != nil && found {
c.Resources.Requests[v1.ResourceCPU] = resourcesAllocated[v1.ResourceCPU]
c.Resources.Requests[v1.ResourceMemory] = resourcesAllocated[v1.ResourceMemory]
c.Resources.Requests[v1.ResourceCPU] = allocatedResources[v1.ResourceCPU]
c.Resources.Requests[v1.ResourceMemory] = allocatedResources[v1.ResourceMemory]
}
}
otherPods = append(otherPods, op)
@ -2422,14 +2422,14 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
activePods := kl.filterOutInactivePods(existingPods)
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// To handle kubelet restarts, test pod admissibility using ResourcesAllocated values
// To handle kubelet restarts, test pod admissibility using AllocatedResources values
// (for cpu & memory) from checkpoint store. If found, that is the source of truth.
podCopy := pod.DeepCopy()
for _, c := range podCopy.Spec.Containers {
resourcesAllocated, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), c.Name)
allocatedResources, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), c.Name)
if c.Resources.Requests != nil && found {
c.Resources.Requests[v1.ResourceCPU] = resourcesAllocated[v1.ResourceCPU]
c.Resources.Requests[v1.ResourceMemory] = resourcesAllocated[v1.ResourceMemory]
c.Resources.Requests[v1.ResourceCPU] = allocatedResources[v1.ResourceCPU]
c.Resources.Requests[v1.ResourceMemory] = allocatedResources[v1.ResourceMemory]
}
}
// Check if we can admit the pod; if not, reject it.
@ -2529,7 +2529,7 @@ func isPodResizeInProgress(pod *v1.Pod, podStatus *v1.PodStatus) bool {
continue
}
if diff.ObjectDiff(c.Resources.Limits, cs.Resources.Limits) != "" ||
diff.ObjectDiff(cs.ResourcesAllocated, cs.Resources.Requests) != "" {
diff.ObjectDiff(cs.AllocatedResources, cs.Resources.Requests) != "" {
return true
}
}
@ -2574,7 +2574,7 @@ func (kl *Kubelet) canResizePod(pod *v1.Pod) (bool, *v1.Pod, v1.PodResizeStatus)
idx, found := podutil.GetIndexOfContainerStatus(podCopy.Status.ContainerStatuses, container.Name)
if found {
for rName, rQuantity := range container.Resources.Requests {
podCopy.Status.ContainerStatuses[idx].ResourcesAllocated[rName] = rQuantity
podCopy.Status.ContainerStatuses[idx].AllocatedResources[rName] = rQuantity
}
}
}
@ -2595,11 +2595,11 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod) {
klog.V(5).InfoS("ContainerStatus not found", "pod", pod.Name, "container", container.Name)
break
}
if len(containerStatus.ResourcesAllocated) != len(container.Resources.Requests) {
klog.V(5).InfoS("ContainerStatus.ResourcesAllocated length mismatch", "pod", pod.Name, "container", container.Name)
if len(containerStatus.AllocatedResources) != len(container.Resources.Requests) {
klog.V(5).InfoS("ContainerStatus.AllocatedResources length mismatch", "pod", pod.Name, "container", container.Name)
break
}
if len(diff.ObjectDiff(container.Resources.Requests, containerStatus.ResourcesAllocated)) > 0 {
if len(diff.ObjectDiff(container.Resources.Requests, containerStatus.AllocatedResources)) > 0 {
podResized = true
break
}

View File

@ -1856,14 +1856,14 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
}
}
container := kubecontainer.GetContainerSpec(pod, cName)
// ResourcesAllocated values come from checkpoint. It is the source-of-truth.
// AllocatedResources values come from checkpoint. It is the source-of-truth.
found := false
status.ResourcesAllocated, found = kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
status.AllocatedResources, found = kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
if !(container.Resources.Requests == nil && container.Resources.Limits == nil) && !found {
// Log error and fallback to ResourcesAllocated in oldStatus if it exists
// Log error and fallback to AllocatedResources in oldStatus if it exists
klog.ErrorS(nil, "resource allocation not found in checkpoint store", "pod", pod.Name, "container", cName)
if oldStatusFound {
status.ResourcesAllocated = oldStatus.ResourcesAllocated
status.AllocatedResources = oldStatus.AllocatedResources
}
}
if oldStatus.Resources == nil {
@ -1887,17 +1887,17 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
}
}
// Convert Requests
if status.ResourcesAllocated != nil {
if status.AllocatedResources != nil {
requests = make(v1.ResourceList)
if cStatus.Resources != nil && cStatus.Resources.CPURequest != nil {
requests[v1.ResourceCPU] = cStatus.Resources.CPURequest.DeepCopy()
} else {
determineResource(v1.ResourceCPU, status.ResourcesAllocated, oldStatus.Resources.Requests, requests)
determineResource(v1.ResourceCPU, status.AllocatedResources, oldStatus.Resources.Requests, requests)
}
if memory, found := status.ResourcesAllocated[v1.ResourceMemory]; found {
if memory, found := status.AllocatedResources[v1.ResourceMemory]; found {
requests[v1.ResourceMemory] = memory.DeepCopy()
}
if ephemeralStorage, found := status.ResourcesAllocated[v1.ResourceEphemeralStorage]; found {
if ephemeralStorage, found := status.AllocatedResources[v1.ResourceEphemeralStorage]; found {
requests[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy()
}
}

View File

@ -4220,7 +4220,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
ResourcesAllocated: CPU1AndMem1G,
AllocatedResources: CPU1AndMem1G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
},
},
@ -4243,7 +4243,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
ResourcesAllocated: CPU1AndMem1G,
AllocatedResources: CPU1AndMem1G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
},
},
@ -4266,7 +4266,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
ResourcesAllocated: CPU1AndMem1GAndStorage2G,
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
@ -4289,7 +4289,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
ResourcesAllocated: CPU1AndMem1GAndStorage2G,
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
@ -4311,7 +4311,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
Image: "img",
ImageID: "img1234",
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
ResourcesAllocated: CPU1AndMem1GAndStorage2G,
AllocatedResources: CPU1AndMem1GAndStorage2G,
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
},
},
@ -4346,7 +4346,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
}
kubelet.statusManager.SetPodAllocation(tPod)
if tc.Resources != nil {
tPod.Status.ContainerStatuses[i].ResourcesAllocated = tc.Resources[i].Requests
tPod.Status.ContainerStatuses[i].AllocatedResources = tc.Resources[i].Requests
testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
MemoryLimit: tc.Resources[i].Limits.Memory(),
CPULimit: tc.Resources[i].Limits.Cpu(),

View File

@ -2494,7 +2494,7 @@ func TestHandlePodResourcesResize(t *testing.T) {
ContainerStatuses: []v1.ContainerStatus{
{
Name: "c1",
ResourcesAllocated: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
Resources: &v1.ResourceRequirements{},
},
},
@ -2584,9 +2584,9 @@ func TestHandlePodResourcesResize(t *testing.T) {
for _, tt := range tests {
tt.pod.Spec.Containers[0].Resources.Requests = tt.newRequests
tt.pod.Status.ContainerStatuses[0].ResourcesAllocated = v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}
tt.pod.Status.ContainerStatuses[0].AllocatedResources = v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}
kubelet.handlePodResourcesResize(tt.pod)
assert.Equal(t, tt.expectedAllocations, tt.pod.Status.ContainerStatuses[0].ResourcesAllocated, tt.name)
assert.Equal(t, tt.expectedAllocations, tt.pod.Status.ContainerStatuses[0].AllocatedResources, tt.name)
assert.Equal(t, tt.expectedResize, tt.pod.Status.Resize, tt.name)
testKubelet.fakeKubeClient.ClearActions()
}

View File

@ -804,27 +804,27 @@ func TestGenerateLinuxContainerResources(t *testing.T) {
&runtimeapi.LinuxContainerResources{CpuShares: 2, OomScoreAdj: 1000},
},
{
"requests & limits, cpu & memory, guaranteed qos - container status with resourcesAllocated",
"requests & limits, cpu & memory, guaranteed qos - container status with allocatedResources",
true,
v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
[]v1.ContainerStatus{
{
Name: "c1",
ResourcesAllocated: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
},
},
&runtimeapi.LinuxContainerResources{CpuShares: 204, MemoryLimitInBytes: 524288000, OomScoreAdj: -997},
},
{
"requests & limits, cpu & memory, burstable qos - container status with resourcesAllocated",
"requests & limits, cpu & memory, burstable qos - container status with allocatedResources",
true,
v1.ResourceList{v1.ResourceCPU: resource.MustParse("500m"), v1.ResourceMemory: resource.MustParse("750Mi")},
v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")},
[]v1.ContainerStatus{
{
Name: "c1",
ResourcesAllocated: v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")},
AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")},
},
},
&runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 786432000, OomScoreAdj: 970},
@ -838,27 +838,27 @@ func TestGenerateLinuxContainerResources(t *testing.T) {
&runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 524288000, OomScoreAdj: -997},
},
{
"requests & limits, cpu & memory, burstable qos - container status with resourcesAllocated",
"requests & limits, cpu & memory, burstable qos - container status with allocatedResources",
false,
v1.ResourceList{v1.ResourceCPU: resource.MustParse("500m"), v1.ResourceMemory: resource.MustParse("750Mi")},
v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")},
[]v1.ContainerStatus{
{
Name: "c1",
ResourcesAllocated: v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")},
AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")},
},
},
&runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 786432000, OomScoreAdj: 970},
},
{
"requests & limits, cpu & memory, guaranteed qos - container status with resourcesAllocated",
"requests & limits, cpu & memory, guaranteed qos - container status with allocatedResources",
false,
v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
[]v1.ContainerStatus{
{
Name: "c1",
ResourcesAllocated: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")},
},
},
&runtimeapi.LinuxContainerResources{CpuShares: 204, MemoryLimitInBytes: 524288000, OomScoreAdj: -997},

View File

@ -532,13 +532,13 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
// Determine if the *running* container needs resource update by comparing v1.Spec.Resources (desired)
// with v1.Status.Resources / runtime.Status.Resources (last known actual).
// Proceed only when kubelet has accepted the resize a.k.a v1.Spec.Resources.Requests == v1.Status.ResourcesAllocated.
// Proceed only when kubelet has accepted the resize a.k.a v1.Spec.Resources.Requests == v1.Status.AllocatedResources.
// Skip if runtime containerID doesn't match pod.Status containerID (container is restarting)
apiContainerStatus, exists := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name)
if !exists || apiContainerStatus.State.Running == nil || apiContainerStatus.Resources == nil ||
kubeContainerStatus.State != kubecontainer.ContainerStateRunning ||
kubeContainerStatus.ID.String() != apiContainerStatus.ContainerID ||
len(diff.ObjectDiff(container.Resources.Requests, apiContainerStatus.ResourcesAllocated)) != 0 {
len(diff.ObjectDiff(container.Resources.Requests, apiContainerStatus.AllocatedResources)) != 0 {
return true
}
@ -569,7 +569,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
desiredResources := containerResources{
memoryLimit: desiredMemoryLimit,
memoryRequest: apiContainerStatus.ResourcesAllocated.Memory().Value(),
memoryRequest: apiContainerStatus.AllocatedResources.Memory().Value(),
cpuLimit: desiredCPULimit,
cpuRequest: desiredCPURequest,
}

View File

@ -65,7 +65,7 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa
memoryRequest := container.Resources.Requests.Memory().Value()
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok {
memoryRequest = cs.ResourcesAllocated.Memory().Value()
memoryRequest = cs.AllocatedResources.Memory().Value()
}
}
oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity

View File

@ -137,7 +137,7 @@ type Manager interface {
// the provided podUIDs.
RemoveOrphanedStatuses(podUIDs map[types.UID]bool)
// GetContainerResourceAllocation returns checkpointed ResourcesAllocated value for the container
// GetContainerResourceAllocation returns checkpointed AllocatedResources value for the container
GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceList, bool)
// GetPodResizeStatus returns checkpointed PodStatus.Resize value
@ -186,7 +186,7 @@ func isPodStatusByKubeletEqual(oldStatus, status *v1.PodStatus) bool {
}
func (m *manager) Start() {
// Create pod allocation checkpoint manager even if client is nil so as to allow local get/set of ResourcesAllocated & Resize
// Create pod allocation checkpoint manager even if client is nil so as to allow local get/set of AllocatedResources & Resize
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
stateImpl, err := state.NewStateCheckpoint(m.stateFileDirectory, podStatusManagerStateFile)
if err != nil {
@ -232,7 +232,7 @@ func (m *manager) Start() {
}, 0)
}
// GetContainerResourceAllocation returns the last checkpointed ResourcesAllocated values
// GetContainerResourceAllocation returns the last checkpointed AllocatedResources values
// If checkpoint manager has not been initialized, it returns nil, false
func (m *manager) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceList, bool) {
m.podStatusesLock.RLock()

View File

@ -763,7 +763,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
usageFgEnabled corev1.ResourceList
usageFgDisabled corev1.ResourceList
}{
"verify Max(Container.Spec.Requests, ContainerStatus.ResourcesAllocated) for memory resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for memory resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
@ -782,7 +782,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
ResourcesAllocated: api.ResourceList{
AllocatedResources: api.ResourceList{
api.ResourceMemory: resource.MustParse("150Mi"),
},
},
@ -804,7 +804,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"verify Max(Container.Spec.Requests, ContainerStatus.ResourcesAllocated) for CPU resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
@ -823,7 +823,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
ResourcesAllocated: api.ResourceList{
AllocatedResources: api.ResourceList{
api.ResourceCPU: resource.MustParse("150m"),
},
},
@ -845,7 +845,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"verify Max(Container.Spec.Requests, ContainerStatus.ResourcesAllocated) for CPU and memory resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU and memory resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
@ -866,7 +866,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
ResourcesAllocated: api.ResourceList{
AllocatedResources: api.ResourceList{
api.ResourceCPU: resource.MustParse("150m"),
api.ResourceMemory: resource.MustParse("250Mi"),
},
@ -895,7 +895,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
},
},
"verify Max(Container.Spec.Requests, ContainerStatus.ResourcesAllocated==nil) for CPU and memory resource": {
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources==nil) for CPU and memory resource": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{

View File

@ -1488,7 +1488,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
ContainerStatuses: []v1.ContainerStatus{
{
Name: "c1",
ResourcesAllocated: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
},
},
},
@ -1497,7 +1497,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
tests := []struct {
name string
requests v1.ResourceList
resourcesAllocated v1.ResourceList
allocatedResources v1.ResourceList
resizeStatus v1.PodResizeStatus
expectedResource Resource
expectedNon0CPU int64
@ -1506,7 +1506,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
{
name: "Pod with no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resourcesAllocated: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: "",
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
@ -1515,7 +1515,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
{
name: "Pod with resize in progress",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resourcesAllocated: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusInProgress,
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
@ -1524,7 +1524,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
{
name: "Pod with deferred resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
resourcesAllocated: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusDeferred,
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
expectedNon0CPU: cpu700m.MilliValue(),
@ -1533,7 +1533,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
{
name: "Pod with infeasible resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
resourcesAllocated: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
resizeStatus: v1.PodResizeStatusInfeasible,
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
expectedNon0CPU: cpu500m.MilliValue(),
@ -1545,7 +1545,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
pod := testpod.DeepCopy()
pod.Spec.Containers[0].Resources.Requests = tt.requests
pod.Status.ContainerStatuses[0].ResourcesAllocated = tt.resourcesAllocated
pod.Status.ContainerStatuses[0].AllocatedResources = tt.allocatedResources
pod.Status.Resize = tt.resizeStatus
res, non0CPU, non0Mem := calculateResource(pod)

View File

@ -2710,12 +2710,12 @@ type ContainerStatus struct {
// same as false.
// +optional
Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"`
// ResourcesAllocated represents the compute resources allocated for this container by the
// AllocatedResources represents the compute resources allocated for this container by the
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
// and after successfully admitting desired pod resize.
// +featureGate=InPlacePodVerticalScaling
// +optional
ResourcesAllocated ResourceList `json:"resourcesAllocated,omitempty" protobuf:"bytes,10,rep,name=resourcesAllocated,casttype=ResourceList,castkey=ResourceName"`
AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
// Resources represents the compute resource requests and limits that have been successfully
// enacted on the running container after it has been started or has been successfully resized.
// +featureGate=InPlacePodVerticalScaling

View File

@ -200,7 +200,7 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat
tcStatus := v1.ContainerStatus{
Name: tcInfo.Name,
ResourcesAllocated: alloc,
AllocatedResources: alloc,
}
return tc, tcStatus
}
@ -279,9 +279,9 @@ func verifyPodAllocations(pod *v1.Pod, tcInfo []TestContainerInfo, flagError boo
_, tcStatus := makeTestContainer(ci)
if flagError {
framework.ExpectEqual(tcStatus.ResourcesAllocated, cStatus.ResourcesAllocated)
framework.ExpectEqual(tcStatus.AllocatedResources, cStatus.AllocatedResources)
}
if diff.ObjectDiff(cStatus.ResourcesAllocated, tcStatus.ResourcesAllocated) != "" {
if diff.ObjectDiff(cStatus.AllocatedResources, tcStatus.AllocatedResources) != "" {
return false
}
}