mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #130917 from vinaykul/ippr-mem-req-ucr
Invoke UpdateContainerResources or trigger container restarts when memory requests are resized
This commit is contained in:
commit
52ab771774
@ -2994,7 +2994,6 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
|
||||
// for any running containers. Specifically, the following differences are ignored:
|
||||
// - Non-resizable containers: non-restartable init containers, ephemeral containers
|
||||
// - Non-resizable resources: only CPU & memory are resizable
|
||||
// - Non-actuated resources: memory requests are not actuated
|
||||
// - Non-running containers: they will be sized correctly when (re)started
|
||||
func (kl *Kubelet) isPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
|
||||
return !podutil.VisitContainers(&allocatedPod.Spec, podutil.InitContainers|podutil.Containers,
|
||||
@ -3012,9 +3011,9 @@ func (kl *Kubelet) isPodResizeInProgress(allocatedPod *v1.Pod, podStatus *kubeco
|
||||
actuatedResources, _ := kl.allocationManager.GetActuatedResources(allocatedPod.UID, allocatedContainer.Name)
|
||||
allocatedResources := allocatedContainer.Resources
|
||||
|
||||
// Memory requests are excluded since they don't need to be actuated.
|
||||
return allocatedResources.Requests[v1.ResourceCPU].Equal(actuatedResources.Requests[v1.ResourceCPU]) &&
|
||||
allocatedResources.Limits[v1.ResourceCPU].Equal(actuatedResources.Limits[v1.ResourceCPU]) &&
|
||||
allocatedResources.Requests[v1.ResourceMemory].Equal(actuatedResources.Requests[v1.ResourceMemory]) &&
|
||||
allocatedResources.Limits[v1.ResourceMemory].Equal(actuatedResources.Limits[v1.ResourceMemory])
|
||||
})
|
||||
}
|
||||
|
@ -2111,6 +2111,13 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
||||
} else {
|
||||
preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Requests, resources.Requests)
|
||||
}
|
||||
// TODO(tallclair,vinaykul,InPlacePodVerticalScaling): Investigate defaulting to actuated resources instead of allocated resources above
|
||||
if _, exists := resources.Requests[v1.ResourceMemory]; exists {
|
||||
// Get memory requests from actuated resources
|
||||
if actuatedResources, found := kl.allocationManager.GetActuatedResources(pod.UID, allocatedContainer.Name); found {
|
||||
resources.Requests[v1.ResourceMemory] = *actuatedResources.Requests.Memory()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resources
|
||||
|
@ -3865,8 +3865,7 @@ func TestIsPodResizeInProgress(t *testing.T) {
|
||||
actuated: &testResources{100, 200, 150, 200},
|
||||
isRunning: true,
|
||||
}},
|
||||
// Memory requests aren't actuated and should be ignored.
|
||||
expectHasResize: false,
|
||||
expectHasResize: true,
|
||||
}, {
|
||||
name: "simple resized container/cpu+mem req",
|
||||
containers: []testContainer{{
|
||||
|
@ -634,8 +634,8 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
return true
|
||||
}
|
||||
|
||||
determineContainerResize := func(rName v1.ResourceName, specValue, statusValue int64) (resize, restart bool) {
|
||||
if specValue == statusValue {
|
||||
determineContainerResize := func(rName v1.ResourceName, desiredValue, currentValue int64) (resize, restart bool) {
|
||||
if desiredValue == currentValue {
|
||||
return false, false
|
||||
}
|
||||
for _, policy := range container.ResizePolicy {
|
||||
@ -646,7 +646,7 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
// If a resource policy isn't set, the implicit default is NotRequired.
|
||||
return true, false
|
||||
}
|
||||
markContainerForUpdate := func(rName v1.ResourceName, specValue, statusValue int64) {
|
||||
markContainerForUpdate := func(rName v1.ResourceName, desiredValue, currentValue int64) {
|
||||
cUpdateInfo := containerToUpdateInfo{
|
||||
container: &container,
|
||||
kubeContainerID: kubeContainerStatus.ID,
|
||||
@ -655,18 +655,19 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
}
|
||||
// Order the container updates such that resource decreases are applied before increases
|
||||
switch {
|
||||
case specValue > statusValue: // append
|
||||
case desiredValue > currentValue: // append
|
||||
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)
|
||||
case specValue < statusValue: // prepend
|
||||
case desiredValue < currentValue: // prepend
|
||||
changes.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})
|
||||
copy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])
|
||||
changes.ContainersToUpdate[rName][0] = cUpdateInfo
|
||||
}
|
||||
}
|
||||
resizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
|
||||
resizeMemReq, restartMemReq := determineContainerResize(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
|
||||
resizeCPULim, restartCPULim := determineContainerResize(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
|
||||
resizeCPUReq, restartCPUReq := determineContainerResize(v1.ResourceCPU, desiredResources.cpuRequest, currentResources.cpuRequest)
|
||||
if restartCPULim || restartCPUReq || restartMemLim {
|
||||
if restartCPULim || restartCPUReq || restartMemLim || restartMemReq {
|
||||
// resize policy requires this container to restart
|
||||
changes.ContainersToKill[kubeContainerStatus.ID] = containerToKillInfo{
|
||||
name: kubeContainerStatus.Name,
|
||||
@ -683,6 +684,8 @@ func (m *kubeGenericRuntimeManager) computePodResizeAction(pod *v1.Pod, containe
|
||||
} else {
|
||||
if resizeMemLim {
|
||||
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryLimit, currentResources.memoryLimit)
|
||||
} else if resizeMemReq {
|
||||
markContainerForUpdate(v1.ResourceMemory, desiredResources.memoryRequest, currentResources.memoryRequest)
|
||||
}
|
||||
if resizeCPULim {
|
||||
markContainerForUpdate(v1.ResourceCPU, desiredResources.cpuLimit, currentResources.cpuLimit)
|
||||
|
@ -2897,6 +2897,96 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
||||
return &pa
|
||||
},
|
||||
},
|
||||
"Update container memory (requests only) with RestartContainer policy for memory": {
|
||||
setupFn: func(pod *v1.Pod) {
|
||||
c := &pod.Spec.Containers[2]
|
||||
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
|
||||
c.Resources = v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
|
||||
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
|
||||
}
|
||||
setupActuatedResources(pod, c, v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: cpu200m.DeepCopy(),
|
||||
v1.ResourceMemory: mem200M.DeepCopy(),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: cpu100m.DeepCopy(),
|
||||
v1.ResourceMemory: mem200M.DeepCopy(),
|
||||
},
|
||||
})
|
||||
},
|
||||
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
|
||||
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
|
||||
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
|
||||
killMap[kcs.ID] = containerToKillInfo{
|
||||
container: &pod.Spec.Containers[2],
|
||||
name: pod.Spec.Containers[2].Name,
|
||||
}
|
||||
pa := podActions{
|
||||
SandboxID: podStatus.SandboxStatuses[0].Id,
|
||||
ContainersToStart: []int{2},
|
||||
ContainersToKill: killMap,
|
||||
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
|
||||
UpdatePodResources: true,
|
||||
}
|
||||
return &pa
|
||||
},
|
||||
},
|
||||
"Update container memory (requests only) with RestartNotRequired policy for memory": {
|
||||
setupFn: func(pod *v1.Pod) {
|
||||
c := &pod.Spec.Containers[2]
|
||||
c.ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartNotRequired}
|
||||
c.Resources = v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
|
||||
Requests: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
|
||||
}
|
||||
setupActuatedResources(pod, c, v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: cpu200m.DeepCopy(),
|
||||
v1.ResourceMemory: mem200M.DeepCopy(),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: cpu100m.DeepCopy(),
|
||||
v1.ResourceMemory: mem200M.DeepCopy(),
|
||||
},
|
||||
})
|
||||
},
|
||||
getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
|
||||
kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
|
||||
killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
|
||||
killMap[kcs.ID] = containerToKillInfo{
|
||||
container: &pod.Spec.Containers[2],
|
||||
name: pod.Spec.Containers[2].Name,
|
||||
}
|
||||
pa := podActions{
|
||||
SandboxID: podStatus.SandboxStatuses[0].Id,
|
||||
ContainersToStart: []int{},
|
||||
ContainersToKill: getKillMap(pod, podStatus, []int{}),
|
||||
ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
|
||||
v1.ResourceMemory: {
|
||||
{
|
||||
container: &pod.Spec.Containers[2],
|
||||
kubeContainerID: kcs.ID,
|
||||
desiredContainerResources: containerResources{
|
||||
memoryLimit: mem200M.Value(),
|
||||
memoryRequest: mem100M.Value(),
|
||||
cpuLimit: cpu200m.MilliValue(),
|
||||
cpuRequest: cpu100m.MilliValue(),
|
||||
},
|
||||
currentContainerResources: &containerResources{
|
||||
memoryLimit: mem200M.Value(),
|
||||
memoryRequest: mem200M.Value(),
|
||||
cpuLimit: cpu200m.MilliValue(),
|
||||
cpuRequest: cpu100m.MilliValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return &pa
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
pod, status := makeBasePodAndStatus()
|
||||
|
@ -93,8 +93,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -105,8 +105,7 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Guaranteed QoS pod, one container - decrease CPU only",
|
||||
testRollback: true,
|
||||
name: "Guaranteed QoS pod, one container - decrease CPU only",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -116,8 +115,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPU),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPU),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -126,6 +125,7 @@ func doPodResizeTests() {
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
},
|
||||
testRollback: true,
|
||||
},
|
||||
{
|
||||
name: "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2, c3) ; decrease: CPU (c2)",
|
||||
@ -150,10 +150,10 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}},
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}},
|
||||
{"name":"c3", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`,
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}},
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}},
|
||||
{"name":"c3", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`,
|
||||
increasedCPU, increasedCPU,
|
||||
offsetCPU(1, reducedCPU), offsetMemory(1, increasedMem), offsetCPU(1, reducedCPU), offsetMemory(1, increasedMem),
|
||||
offsetCPU(2, increasedCPU), offsetMemory(2, increasedMem), offsetCPU(2, increasedCPU), offsetMemory(2, increasedMem)),
|
||||
@ -179,8 +179,7 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
|
||||
testRollback: true,
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -188,14 +187,15 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, reducedMem),
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, reducedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPULimit, MemReq: reducedMem, MemLim: originalMemLimit},
|
||||
},
|
||||
},
|
||||
testRollback: true,
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests only",
|
||||
@ -206,8 +206,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, increasedMem),
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, increasedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -224,8 +224,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"limits":{"memory":"%s"}}}
|
||||
]}}`, increasedMemLimit),
|
||||
{"name":"c1", "resources":{"limits":{"memory":"%s"}}}
|
||||
]}}`, increasedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -234,8 +234,7 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
|
||||
testRollback: true,
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -243,18 +242,18 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: reducedCPU, CPULim: originalCPULimit, MemReq: originalMem, MemLim: originalMemLimit},
|
||||
},
|
||||
},
|
||||
testRollback: true,
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
|
||||
testRollback: true,
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -262,14 +261,15 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPULimit),
|
||||
{"name":"c1", "resources":{"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: reducedCPULimit, MemReq: originalMem, MemLim: originalMemLimit},
|
||||
},
|
||||
},
|
||||
testRollback: true,
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests only",
|
||||
@ -280,8 +280,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -298,8 +298,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPULimit),
|
||||
{"name":"c1", "resources":{"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -316,8 +316,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPULimit),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -334,8 +334,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU, increasedCPULimit),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU, increasedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -352,8 +352,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, increasedCPULimit),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, increasedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -370,8 +370,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU, reducedCPULimit),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU, reducedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -388,8 +388,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, increasedMem, increasedMemLimit),
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, increasedMem, increasedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -406,8 +406,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, reducedMem, increasedMemLimit),
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, reducedMem, increasedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -424,8 +424,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, reducedCPU, increasedMemLimit),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, reducedCPU, increasedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -442,8 +442,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedMem, increasedCPULimit),
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedMem, increasedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -460,8 +460,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedMem, reducedCPULimit),
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedMem, reducedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -478,8 +478,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, reducedMem),
|
||||
{"name":"c1", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, reducedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -496,8 +496,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -514,8 +514,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: `{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"1m"},"limits":{"cpu":"5m"}}}
|
||||
]}}`,
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"1m"},"limits":{"cpu":"5m"}}}
|
||||
]}}`,
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -524,8 +524,7 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
|
||||
testRollback: true,
|
||||
name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -535,8 +534,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -546,10 +545,34 @@ func doPodResizeTests() {
|
||||
RestartCount: 1,
|
||||
},
|
||||
},
|
||||
testRollback: true,
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container - decrease CPU (NotRequired) & memory (RestartContainer)",
|
||||
name: "Burstable QoS pod, one container - decrease CPU (NotRequired) & memory (RestartContainer)",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPULimit, MemReq: originalMem, MemLim: originalMemLimit},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &doRestart,
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, reducedCPU, reducedMem, reducedCPULimit, reducedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: reducedCPU, CPULim: reducedCPULimit, MemReq: reducedMem, MemLim: reducedMemLimit},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &doRestart,
|
||||
RestartCount: 1,
|
||||
},
|
||||
},
|
||||
testRollback: true,
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container - decrease memory request (RestartContainer memory resize policy)",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -560,17 +583,40 @@ func doPodResizeTests() {
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, reducedCPU, reducedMem, reducedCPULimit, reducedMemLimit),
|
||||
]}}`, originalCPU, reducedMem, originalCPULimit, originalMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: reducedCPU, CPULim: reducedCPULimit, MemReq: reducedMem, MemLim: reducedMemLimit},
|
||||
Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPULimit, MemReq: reducedMem, MemLim: originalMemLimit},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &doRestart,
|
||||
RestartCount: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container - increase memory request (NoRestart memory resize policy)",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPULimit, MemReq: originalMem, MemLim: originalMemLimit},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, originalCPU, increasedMem, originalCPULimit, originalMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: &e2epod.ContainerResources{CPUReq: originalCPU, CPULim: originalCPULimit, MemReq: increasedMem, MemLim: originalMemLimit},
|
||||
CPUPolicy: &noRestart,
|
||||
MemPolicy: &noRestart,
|
||||
RestartCount: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod)",
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
@ -594,9 +640,9 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}},
|
||||
{"name":"c3", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`,
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}},
|
||||
{"name":"c3", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`,
|
||||
increasedCPU, increasedMem, increasedCPULimit, increasedMemLimit,
|
||||
offsetCPU(2, reducedCPU), offsetMemory(2, reducedMem), offsetCPU(2, reducedCPULimit)),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
@ -643,9 +689,9 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s"}}},
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`,
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s"}}},
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`,
|
||||
reducedCPU, reducedMem, reducedCPULimit,
|
||||
offsetCPU(2, increasedCPU), offsetMemory(2, increasedMem), offsetCPU(2, increasedCPULimit), offsetMemory(2, increasedMemLimit)),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
@ -693,9 +739,9 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}},
|
||||
{"name":"c3", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`,
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}},
|
||||
{"name":"c3", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`,
|
||||
offsetCPU(1, increasedCPU), offsetMemory(1, increasedMem), offsetCPU(1, increasedCPULimit), offsetMemory(1, increasedMemLimit),
|
||||
reducedCPU, reducedMem, reducedCPULimit, reducedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
@ -736,8 +782,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -766,8 +812,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, originalCPU, originalMem),
|
||||
{"name":"c2", "resources":{"requests":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, originalCPU, originalMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -800,8 +846,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c2", "resources":{"limits":{"cpu":"%s"}}}
|
||||
]}}`, originalCPULimit),
|
||||
{"name":"c2", "resources":{"limits":{"cpu":"%s"}}}
|
||||
]}}`, originalCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -829,8 +875,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"containers":[
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
{"name":"c1", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -877,8 +923,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPU, increasedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -911,8 +957,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, reducedCPU, increasedMem, reducedCPU, increasedMem),
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, reducedCPU, increasedMem, reducedCPU, increasedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -941,8 +987,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPU),
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPU),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -971,8 +1017,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPULimit, increasedMemLimit),
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s","memory":"%s"},"limits":{"cpu":"%s","memory":"%s"}}}
|
||||
]}}`, increasedCPU, increasedMem, increasedCPULimit, increasedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -1001,8 +1047,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPULimit),
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, reducedCPU, reducedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -1031,8 +1077,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU, increasedCPULimit),
|
||||
{"name":"c1-init", "resources":{"requests":{"cpu":"%s"},"limits":{"cpu":"%s"}}}
|
||||
]}}`, increasedCPU, increasedCPULimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -1061,8 +1107,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, reducedMem),
|
||||
{"name":"c1-init", "resources":{"requests":{"memory":"%s"}}}
|
||||
]}}`, reducedMem),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -1091,8 +1137,8 @@ func doPodResizeTests() {
|
||||
},
|
||||
},
|
||||
patchString: fmt.Sprintf(`{"spec":{"initContainers":[
|
||||
{"name":"c1-init", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, increasedMem, increasedMemLimit),
|
||||
{"name":"c1-init", "resources":{"requests":{"memory":"%s"},"limits":{"memory":"%s"}}}
|
||||
]}}`, increasedMem, increasedMemLimit),
|
||||
expected: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -1398,9 +1444,9 @@ var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalSc
|
||||
f := framework.NewDefaultFramework("pod-resize-tests")
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
_, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
if framework.NodeOSDistroIs("windows") || e2enode.IsARM64(node) {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping")
|
||||
}
|
||||
})
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
helpers "k8s.io/component-helpers/resource"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubeqos "k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -167,6 +168,7 @@ func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
|
||||
func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
|
||||
testInitContainers, testContainers := separateContainers(tcInfo)
|
||||
|
||||
minGracePeriodSeconds := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -176,10 +178,11 @@ func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []Resizab
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
OS: &v1.PodOS{Name: v1.Linux},
|
||||
InitContainers: testInitContainers,
|
||||
Containers: testContainers,
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
OS: &v1.PodOS{Name: v1.Linux},
|
||||
InitContainers: testInitContainers,
|
||||
Containers: testContainers,
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
TerminationGracePeriodSeconds: &minGracePeriodSeconds,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
@ -350,27 +353,29 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
|
||||
}
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimitString))
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
|
||||
// TODO(vinaykul,InPlacePodVerticalScaling): Verify oom_score_adj when runc adds support for updating it
|
||||
// See https://github.com/opencontainers/runc/pull/4669
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyPodRestarts(pod *v1.Pod, wantInfo []ResizableContainerInfo) error {
|
||||
func verifyPodRestarts(f *framework.Framework, pod *v1.Pod, wantInfo []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
initCtrStatuses, ctrStatuses := separateContainerStatuses(wantInfo)
|
||||
errs := []error{}
|
||||
if err := verifyContainerRestarts(pod.Status.InitContainerStatuses, initCtrStatuses); err != nil {
|
||||
if err := verifyContainerRestarts(f, pod, pod.Status.InitContainerStatuses, initCtrStatuses); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := verifyContainerRestarts(pod.Status.ContainerStatuses, ctrStatuses); err != nil {
|
||||
if err := verifyContainerRestarts(f, pod, pod.Status.ContainerStatuses, ctrStatuses); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyContainerRestarts(gotStatuses []v1.ContainerStatus, wantStatuses []v1.ContainerStatus) error {
|
||||
func verifyContainerRestarts(f *framework.Framework, pod *v1.Pod, gotStatuses []v1.ContainerStatus, wantStatuses []v1.ContainerStatus) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
if len(gotStatuses) != len(wantStatuses) {
|
||||
@ -382,11 +387,34 @@ func verifyContainerRestarts(gotStatuses []v1.ContainerStatus, wantStatuses []v1
|
||||
for i, gotStatus := range gotStatuses {
|
||||
if gotStatus.RestartCount != wantStatuses[i].RestartCount {
|
||||
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", gotStatus.Name, gotStatus.RestartCount, wantStatuses[i].RestartCount))
|
||||
} else if gotStatus.RestartCount > 0 {
|
||||
err := verifyOomScoreAdj(f, pod, gotStatus.Name)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyOomScoreAdj(f *framework.Framework, pod *v1.Pod, containerName string) error {
|
||||
container := FindContainerInPod(pod, containerName)
|
||||
if container == nil {
|
||||
return fmt.Errorf("failed to find container %s in pod %s", containerName, pod.Name)
|
||||
}
|
||||
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(context.Background(), pod.Spec.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeMemoryCapacity := node.Status.Capacity[v1.ResourceMemory]
|
||||
oomScoreAdj := kubeqos.GetContainerOOMScoreAdjust(pod, container, int64(nodeMemoryCapacity.Value()))
|
||||
expectedOomScoreAdj := strconv.FormatInt(int64(oomScoreAdj), 10)
|
||||
|
||||
return VerifyOomScoreAdjValue(f, pod, container.Name, expectedOomScoreAdj)
|
||||
}
|
||||
|
||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
// Wait for resize to complete.
|
||||
@ -436,7 +464,7 @@ func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v
|
||||
if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
|
||||
}
|
||||
if restartErrs := verifyPodRestarts(resizedPod, expectedContainers); restartErrs != nil {
|
||||
if restartErrs := verifyPodRestarts(f, resizedPod, expectedContainers); restartErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
|
||||
}
|
||||
|
||||
|
@ -256,6 +256,21 @@ func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditi
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindContainerInPod finds the container in a pod by its name
|
||||
func FindContainerInPod(pod *v1.Pod, containerName string) *v1.Container {
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if container.Name == containerName {
|
||||
return &container
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.Name == containerName {
|
||||
return &container
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindContainerStatusInPod finds a container status by its name in the provided pod
|
||||
func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
|
||||
for _, containerStatus := range pod.Status.InitContainerStatuses {
|
||||
@ -294,6 +309,24 @@ func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath, expec
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyOomScoreAdjValue verifies that oom_score_adj for pid 1 (pidof init/systemd -> app)
|
||||
// has the expected value in specified container of the pod. It execs into the container,
|
||||
// reads the oom_score_adj value from procfs, and compares it against the expected value.
|
||||
func VerifyOomScoreAdjValue(f *framework.Framework, pod *v1.Pod, cName, expectedOomScoreAdj string) error {
|
||||
cmd := "cat /proc/1/oom_score_adj"
|
||||
framework.Logf("Namespace %s Pod %s Container %s - looking for oom_score_adj value %s",
|
||||
pod.Namespace, pod.Name, cName, expectedOomScoreAdj)
|
||||
oomScoreAdj, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find expected value %s for container app process", expectedOomScoreAdj)
|
||||
}
|
||||
oomScoreAdj = strings.Trim(oomScoreAdj, "\n")
|
||||
if oomScoreAdj != expectedOomScoreAdj {
|
||||
return fmt.Errorf("oom_score_adj value %s not equal to expected %s", oomScoreAdj, expectedOomScoreAdj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node.
|
||||
// TODO: Deduplicate this function with NPD cluster e2e test:
|
||||
// https://github.com/kubernetes/kubernetes/blob/2049360379bcc5d6467769cef112e6e492d3d2f0/test/e2e/node/node_problem_detector.go#L369
|
||||
|
Loading…
Reference in New Issue
Block a user