mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Roll back resize patches in doPodResizeTests
This commit is contained in:
parent
5837de2796
commit
30e395a7fa
@ -18,6 +18,7 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -81,6 +82,28 @@ type TestContainerInfo struct {
|
|||||||
RestartCount int32
|
RestartCount int32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type containerPatch struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Resources struct {
|
||||||
|
Requests struct {
|
||||||
|
CPU string `json:"cpu,omitempty"`
|
||||||
|
Memory string `json:"memory,omitempty"`
|
||||||
|
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||||
|
} `json:"requests"`
|
||||||
|
Limits struct {
|
||||||
|
CPU string `json:"cpu,omitempty"`
|
||||||
|
Memory string `json:"memory,omitempty"`
|
||||||
|
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||||
|
} `json:"limits"`
|
||||||
|
} `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type patchSpec struct {
|
||||||
|
Spec struct {
|
||||||
|
Containers []containerPatch `json:"containers"`
|
||||||
|
} `json:"spec"`
|
||||||
|
}
|
||||||
|
|
||||||
func isInPlaceResizeSupportedByRuntime(c clientset.Interface, nodeName string) bool {
|
func isInPlaceResizeSupportedByRuntime(c clientset.Interface, nodeName string) bool {
|
||||||
//TODO(vinaykul,InPlacePodVerticalScaling): Can we optimize this?
|
//TODO(vinaykul,InPlacePodVerticalScaling): Can we optimize this?
|
||||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
@ -418,11 +441,18 @@ func verifyPodContainersCgroupValues(pod *v1.Pod, tcInfo []TestContainerInfo, fl
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo) *v1.Pod {
|
func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) *v1.Pod {
|
||||||
|
|
||||||
waitForContainerRestart := func() error {
|
waitForContainerRestart := func() error {
|
||||||
var restartContainersExpected []string
|
var restartContainersExpected []string
|
||||||
for _, ci := range expectedContainers {
|
|
||||||
|
restartContainers := expectedContainers
|
||||||
|
// if we're rolling back, extract restart counts from test case "expected" containers
|
||||||
|
if isRollback {
|
||||||
|
restartContainers = initialContainers
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ci := range restartContainers {
|
||||||
if ci.RestartCount > 0 {
|
if ci.RestartCount > 0 {
|
||||||
restartContainersExpected = append(restartContainersExpected, ci.Name)
|
restartContainersExpected = append(restartContainersExpected, ci.Name)
|
||||||
}
|
}
|
||||||
@ -438,7 +468,12 @@ func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClien
|
|||||||
restartedContainersCount := 0
|
restartedContainersCount := 0
|
||||||
for _, cName := range restartContainersExpected {
|
for _, cName := range restartContainersExpected {
|
||||||
cs, _ := podutil.GetContainerStatus(pod.Status.ContainerStatuses, cName)
|
cs, _ := podutil.GetContainerStatus(pod.Status.ContainerStatuses, cName)
|
||||||
if cs.RestartCount < 1 {
|
expectedRestarts := int32(1)
|
||||||
|
// if we're rolling back, we should have 2 container restarts
|
||||||
|
if isRollback {
|
||||||
|
expectedRestarts = int32(2)
|
||||||
|
}
|
||||||
|
if cs.RestartCount < expectedRestarts {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
restartedContainersCount++
|
restartedContainersCount++
|
||||||
@ -514,6 +549,28 @@ func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClien
|
|||||||
return resizedPod
|
return resizedPod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func genPatchString(containers []TestContainerInfo) (string, error) {
|
||||||
|
var patch patchSpec
|
||||||
|
|
||||||
|
for _, container := range containers {
|
||||||
|
var cPatch containerPatch
|
||||||
|
cPatch.Name = container.Name
|
||||||
|
cPatch.Resources.Requests.CPU = container.Resources.CPUReq
|
||||||
|
cPatch.Resources.Requests.Memory = container.Resources.MemReq
|
||||||
|
cPatch.Resources.Limits.CPU = container.Resources.CPULim
|
||||||
|
cPatch.Resources.Limits.Memory = container.Resources.MemLim
|
||||||
|
|
||||||
|
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
patchBytes, err := json.Marshal(patch)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(patchBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
func doPodResizeTests() {
|
func doPodResizeTests() {
|
||||||
f := framework.NewDefaultFramework("pod-resize")
|
f := framework.NewDefaultFramework("pod-resize")
|
||||||
var podClient *e2epod.PodClient
|
var podClient *e2epod.PodClient
|
||||||
@ -1263,31 +1320,40 @@ func doPodResizeTests() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("patching pod for resize")
|
patchAndVerify := func(patchString string, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, opStr string, isRollback bool) {
|
||||||
|
ginkgo.By(fmt.Sprintf("patching pod for %s", opStr))
|
||||||
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
||||||
types.StrategicMergePatchType, []byte(tc.patchString), metav1.PatchOptions{})
|
types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
|
||||||
framework.ExpectNoError(pErr, "failed to patch pod for resize")
|
framework.ExpectNoError(pErr, fmt.Sprintf("failed to patch pod for %s", opStr))
|
||||||
|
|
||||||
ginkgo.By("verifying pod patched for resize")
|
ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr))
|
||||||
verifyPodResources(patchedPod, tc.expected)
|
verifyPodResources(patchedPod, expectedContainers)
|
||||||
verifyPodAllocations(patchedPod, tc.containers, true)
|
verifyPodAllocations(patchedPod, initialContainers, true)
|
||||||
|
|
||||||
ginkgo.By("waiting for resize to be actuated")
|
ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr))
|
||||||
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, tc.expected)
|
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, expectedContainers, initialContainers, isRollback)
|
||||||
|
|
||||||
// Check cgroup values only for containerd versions before 1.6.9
|
// Check cgroup values only for containerd versions before 1.6.9
|
||||||
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
|
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
|
||||||
ginkgo.By("verifying pod container's cgroup values after resize")
|
ginkgo.By(fmt.Sprintf("verifying pod container's cgroup values after %s", opStr))
|
||||||
if !framework.NodeOSDistroIs("windows") {
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
verifyPodContainersCgroupValues(resizedPod, tc.expected, true)
|
verifyPodContainersCgroupValues(resizedPod, expectedContainers, true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("verifying pod resources after resize")
|
ginkgo.By(fmt.Sprintf("verifying pod resources after %s", opStr))
|
||||||
verifyPodResources(resizedPod, tc.expected)
|
verifyPodResources(resizedPod, expectedContainers)
|
||||||
|
|
||||||
ginkgo.By("verifying pod allocations after resize")
|
ginkgo.By(fmt.Sprintf("verifying pod allocations after %s", opStr))
|
||||||
verifyPodAllocations(resizedPod, tc.expected, true)
|
verifyPodAllocations(resizedPod, expectedContainers, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
patchAndVerify(tc.patchString, tc.expected, tc.containers, "resize", false)
|
||||||
|
|
||||||
|
rbPatchStr, err := genPatchString(tc.containers)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
// Resize has been actuated, test rollback
|
||||||
|
patchAndVerify(rbPatchStr, tc.containers, tc.expected, "rollback", true)
|
||||||
|
|
||||||
ginkgo.By("deleting pod")
|
ginkgo.By("deleting pod")
|
||||||
err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod)
|
err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod)
|
||||||
@ -1372,7 +1438,7 @@ func doPodResizeResourceQuotaTests() {
|
|||||||
verifyPodAllocations(patchedPod, containers, true)
|
verifyPodAllocations(patchedPod, containers, true)
|
||||||
|
|
||||||
ginkgo.By("waiting for resize to be actuated")
|
ginkgo.By("waiting for resize to be actuated")
|
||||||
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod1, patchedPod, expected)
|
resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod1, patchedPod, expected, containers, false)
|
||||||
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod1.Spec.NodeName) {
|
if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod1.Spec.NodeName) {
|
||||||
ginkgo.By("verifying pod container's cgroup values after resize")
|
ginkgo.By("verifying pod container's cgroup values after resize")
|
||||||
if !framework.NodeOSDistroIs("windows") {
|
if !framework.NodeOSDistroIs("windows") {
|
||||||
|
Loading…
Reference in New Issue
Block a user