diff --git a/test/e2e_node/pod_resize_test.go b/test/e2e/common/node/pod_resize.go similarity index 91% rename from test/e2e_node/pod_resize_test.go rename to test/e2e/common/node/pod_resize.go index 56f7f35315a..f5852e43889 100644 --- a/test/e2e_node/pod_resize_test.go +++ b/test/e2e/common/node/pod_resize.go @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package e2enode +package node import ( "context" + "encoding/json" "fmt" "regexp" "strconv" @@ -32,14 +33,16 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" - testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -59,10 +62,7 @@ const ( ) var ( - podOnCgroupv2Node bool = IsCgroup2UnifiedMode() - cgroupMemLimit string = Cgroupv2MemLimit - cgroupCPULimit string = Cgroupv2CPULimit - cgroupCPURequest string = Cgroupv2CPURequest + podOnCgroupv2Node *bool ) type ContainerResources struct { @@ -114,16 +114,19 @@ type patchSpec struct { } `json:"spec"` } -func supportsInPlacePodVerticalScaling(ctx context.Context, f *framework.Framework) bool { - node := getLocalNode(ctx, f) +func isInPlacePodVerticalScalingSupportedByRuntime(ctx context.Context, c clientset.Interface) bool { + node, err := e2enode.GetRandomReadySchedulableNode(ctx, c) + framework.ExpectNoError(err) re := regexp.MustCompile("containerd://(.*)") match := re.FindStringSubmatch(node.Status.NodeInfo.ContainerRuntimeVersion) if len(match) != 2 { return false } - // TODO(InPlacePodVerticalScaling): Update when RuntimeHandlerFeature for pod resize have been implemented if ver, verr := semver.ParseTolerant(match[1]); verr == nil { - return ver.Compare(semver.MustParse(MinContainerRuntimeVersion)) >= 0 + if ver.Compare(semver.MustParse(MinContainerRuntimeVersion)) < 0 { + return false + } + return true } return false } @@ -222,15 +225,11 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod { var testContainers []v1.Container - var podOS *v1.PodOS for _, ci := range tcInfo { tc, _ := makeTestContainer(ci) testContainers = append(testContainers, tc) } - - podOS = &v1.PodOS{Name: v1.Linux} - pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -240,7 +239,7 @@ func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod }, }, Spec: v1.PodSpec{ - OS: podOS, + OS: &v1.PodOS{Name: v1.Linux}, Containers: testContainers, RestartPolicy: v1.RestartPolicyOnFailure, }, @@ -248,89 +247,95 @@ func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod return pod } -func verifyPodResizePolicy(pod *v1.Pod, tcInfo []TestContainerInfo) { +func verifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []TestContainerInfo) { ginkgo.GinkgoHelper() - cMap := make(map[string]*v1.Container) - for i, c := range pod.Spec.Containers { - cMap[c.Name] = &pod.Spec.Containers[i] - } - for _, ci := range tcInfo { - gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name)) - c := cMap[ci.Name] - tc, _ := makeTestContainer(ci) - gomega.Expect(tc.ResizePolicy).To(gomega.Equal(c.ResizePolicy)) + for i, wantCtr := range wantCtrs { + gotCtr := &gotPod.Spec.Containers[i] + ctr, _ := makeTestContainer(wantCtr) + gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name)) + gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy)) } } -func verifyPodResources(pod *v1.Pod, tcInfo []TestContainerInfo) { +func verifyPodResources(gotPod *v1.Pod, wantCtrs []TestContainerInfo) { ginkgo.GinkgoHelper() - cMap := make(map[string]*v1.Container) - for i, c := range pod.Spec.Containers { - cMap[c.Name] = &pod.Spec.Containers[i] - } - for _, ci := range tcInfo { - gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name)) - c := cMap[ci.Name] - tc, _ := makeTestContainer(ci) - gomega.Expect(tc.Resources).To(gomega.Equal(c.Resources)) + for i, wantCtr := range wantCtrs { + gotCtr := &gotPod.Spec.Containers[i] + ctr, _ := makeTestContainer(wantCtr) + gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name)) + gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources)) } } -func verifyPodAllocations(pod *v1.Pod, tcInfo []TestContainerInfo) error { +func verifyPodAllocations(gotPod *v1.Pod, wantCtrs []TestContainerInfo) error { ginkgo.GinkgoHelper() - cStatusMap := make(map[string]*v1.ContainerStatus) - for i, c := range pod.Status.ContainerStatuses { - cStatusMap[c.Name] = &pod.Status.ContainerStatuses[i] - } - - for _, ci := range tcInfo { - gomega.Expect(cStatusMap).Should(gomega.HaveKey(ci.Name)) - cStatus := cStatusMap[ci.Name] - if ci.Allocations == nil { - if ci.Resources != nil { - alloc := &ContainerAllocations{CPUAlloc: ci.Resources.CPUReq, MemAlloc: ci.Resources.MemReq} - ci.Allocations = alloc + for i, wantCtr := range wantCtrs { + gotCtrStatus := &gotPod.Status.ContainerStatuses[i] + if wantCtr.Allocations == nil { + if wantCtr.Resources != nil { + alloc := &ContainerAllocations{CPUAlloc: wantCtr.Resources.CPUReq, MemAlloc: wantCtr.Resources.MemReq} + wantCtr.Allocations = alloc defer func() { - ci.Allocations = nil + wantCtr.Allocations = nil }() } } - _, tcStatus := makeTestContainer(ci) - if !cmp.Equal(cStatus.AllocatedResources, tcStatus.AllocatedResources) { + _, ctrStatus := makeTestContainer(wantCtr) + gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctrStatus.Name)) + if !cmp.Equal(gotCtrStatus.AllocatedResources, ctrStatus.AllocatedResources) { return fmt.Errorf("failed to verify Pod allocations, allocated resources not equal to expected") } } return nil } -func verifyPodStatusResources(pod *v1.Pod, tcInfo []TestContainerInfo) { +func verifyPodStatusResources(gotPod *v1.Pod, wantCtrs []TestContainerInfo) { ginkgo.GinkgoHelper() - csMap := make(map[string]*v1.ContainerStatus) - for i, c := range pod.Status.ContainerStatuses { - csMap[c.Name] = &pod.Status.ContainerStatuses[i] + for i, wantCtr := range wantCtrs { + gotCtrStatus := &gotPod.Status.ContainerStatuses[i] + ctr, _ := makeTestContainer(wantCtr) + gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctr.Name)) + gomega.Expect(ctr.Resources).To(gomega.Equal(*gotCtrStatus.Resources)) } - for _, ci := range tcInfo { - gomega.Expect(csMap).Should(gomega.HaveKey(ci.Name)) - cs := csMap[ci.Name] - tc, _ := makeTestContainer(ci) - gomega.Expect(tc.Resources).To(gomega.Equal(*cs.Resources)) +} + +func isPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool { + // Determine if pod is running on cgroupv2 or cgroupv1 node + //TODO(vinaykul,InPlacePodVerticalScaling): Is there a better way to determine this? + cmd := "mount -t cgroup2" + out, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd) + if err != nil { + return false } + return len(out) != 0 } func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework, pod *v1.Pod, tcInfo []TestContainerInfo) error { ginkgo.GinkgoHelper() + if podOnCgroupv2Node == nil { + value := isPodOnCgroupv2Node(f, pod) + podOnCgroupv2Node = &value + } + cgroupMemLimit := Cgroupv2MemLimit + cgroupCPULimit := Cgroupv2CPULimit + cgroupCPURequest := Cgroupv2CPURequest + if !*podOnCgroupv2Node { + cgroupMemLimit = CgroupMemLimit + cgroupCPULimit = CgroupCPUQuota + cgroupCPURequest = CgroupCPUShares + } verifyCgroupValue := func(cName, cgPath, expectedCgValue string) error { - mycmd := fmt.Sprintf("head -n 1 %s", cgPath) - cgValue, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", mycmd) + cmd := fmt.Sprintf("head -n 1 %s", cgPath) framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s", pod.Namespace, pod.Name, cName, expectedCgValue, cgPath) + cgValue, _, err := e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd) if err != nil { - return fmt.Errorf("failed to find expected value '%s' in container cgroup '%s'", expectedCgValue, cgPath) + return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath) } cgValue = strings.Trim(cgValue, "\n") if cgValue != expectedCgValue { - return fmt.Errorf("cgroup value '%s' not equal to expected '%s'", cgValue, expectedCgValue) + return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue) } return nil } @@ -356,7 +361,7 @@ func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework } expectedCPULimitString = strconv.FormatInt(cpuQuota, 10) expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10) - if podOnCgroupv2Node { + if *podOnCgroupv2Node { if expectedCPULimitString == "-1" { expectedCPULimitString = "max" } @@ -387,10 +392,17 @@ func verifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework return nil } -func waitForContainerRestart(ctx context.Context, f *framework.Framework, podClient *e2epod.PodClient, pod *v1.Pod, expectedContainers []TestContainerInfo) error { +func waitForContainerRestart(ctx context.Context, podClient *e2epod.PodClient, pod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) error { ginkgo.GinkgoHelper() var restartContainersExpected []string - for _, ci := range expectedContainers { + + restartContainers := expectedContainers + // if we're rolling back, extract restart counts from test case "expected" containers + if isRollback { + restartContainers = initialContainers + } + + for _, ci := range restartContainers { if ci.RestartCount > 0 { restartContainersExpected = append(restartContainersExpected, ci.Name) } @@ -398,6 +410,7 @@ func waitForContainerRestart(ctx context.Context, f *framework.Framework, podCli if len(restartContainersExpected) == 0 { return nil } + pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{}) if err != nil { return err @@ -420,14 +433,14 @@ func waitForContainerRestart(ctx context.Context, f *framework.Framework, podCli } } -func waitForPodResizeActuation(ctx context.Context, f *framework.Framework, c clientset.Interface, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo) *v1.Pod { +func waitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, isRollback bool) *v1.Pod { ginkgo.GinkgoHelper() var resizedPod *v1.Pod var pErr error timeouts := framework.NewTimeoutContext() // Wait for container restart gomega.Eventually(ctx, waitForContainerRestart, timeouts.PodStartShort, timeouts.Poll). - WithArguments(f, podClient, pod, expectedContainers). + WithArguments(podClient, pod, expectedContainers, initialContainers, isRollback). ShouldNot(gomega.HaveOccurred(), "failed waiting for expected container restart") // Verify Pod Containers Cgroup Values gomega.Eventually(ctx, verifyPodContainersCgroupValues, timeouts.PodStartShort, timeouts.Poll). @@ -1285,13 +1298,12 @@ func doPodResizeTests() { for idx := range tests { tc := tests[idx] ginkgo.It(tc.name, func(ctx context.Context) { - ginkgo.By("waiting for the node to be ready", func() { - if !supportsInPlacePodVerticalScaling(ctx, f) || framework.NodeOSDistroIs("windows") || isRunningOnArm64() { + ginkgo.By("check if in place pod vertical scaling is supported", func() { + if !isInPlacePodVerticalScalingSupportedByRuntime(ctx, f.ClientSet) || framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping") } }) - var testPod *v1.Pod - var patchedPod *v1.Pod + var testPod, patchedPod *v1.Pod var pErr error tStamp := strconv.Itoa(time.Now().Nanosecond()) @@ -1322,9 +1334,8 @@ func doPodResizeTests() { ginkgo.By("verifying initial pod resize policy is as expected") verifyPodResizePolicy(newPod, tc.containers) - ginkgo.By("verifying initial pod status resources") + ginkgo.By("verifying initial pod status resources are as expected") verifyPodStatusResources(newPod, tc.containers) - ginkgo.By("verifying initial cgroup config are as expected") framework.ExpectNoError(verifyPodContainersCgroupValues(ctx, f, newPod, tc.containers)) @@ -1409,8 +1420,8 @@ func doPodResizeErrorTests() { for idx := range tests { tc := tests[idx] ginkgo.It(tc.name, func(ctx context.Context) { - ginkgo.By("waiting for the node to be ready", func() { - if !supportsInPlacePodVerticalScaling(ctx, f) || framework.NodeOSDistroIs("windows") || isRunningOnArm64() { + ginkgo.By("check if in place pod vertical scaling is supported", func() { + if !isInPlacePodVerticalScalingSupportedByRuntime(ctx, f.ClientSet) || framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("runtime does not support InPlacePodVerticalScaling -- skipping") } }) @@ -1426,10 +1437,6 @@ func doPodResizeErrorTests() { ginkgo.By("creating pod") newPod := podClient.CreateSync(ctx, testPod) - perr := e2epod.WaitForPodCondition(ctx, f.ClientSet, newPod.Namespace, newPod.Name, "Ready", timeouts.PodStartSlow, testutils.PodRunningReady) - framework.ExpectNoError(perr, "pod %s/%s did not go running", newPod.Namespace, newPod.Name) - framework.Logf("pod %s/%s running", newPod.Namespace, newPod.Name) - ginkgo.By("verifying initial pod resources, allocations, and policy are as expected") verifyPodResources(newPod, tc.containers) verifyPodResizePolicy(newPod, tc.containers) @@ -1469,12 +1476,7 @@ func doPodResizeErrorTests() { // Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests() // in test/e2e/node/pod_resize.go -var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, "[NodeAlphaFeature:InPlacePodVerticalScaling]", func() { - if !podOnCgroupv2Node { - cgroupMemLimit = CgroupMemLimit - cgroupCPULimit = CgroupCPUQuota - cgroupCPURequest = CgroupCPUShares - } +var _ = SIGDescribe("Pod InPlace Resize Container", framework.WithSerial(), feature.InPlacePodVerticalScaling, func() { doPodResizeTests() doPodResizeErrorTests() }) diff --git a/test/e2e/node/pod_resize.go b/test/e2e/node/pod_resize.go index 464981e3019..9a087b5760b 100644 --- a/test/e2e/node/pod_resize.go +++ b/test/e2e/node/pod_resize.go @@ -26,17 +26,19 @@ import ( "strings" "time" + semver "github.com/blang/semver/v4" + "github.com/google/go-cmp/cmp" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" resourceapi "k8s.io/kubernetes/pkg/api/v1/resource" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" - "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" @@ -44,11 +46,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" imageutils "k8s.io/kubernetes/test/utils/image" - - semver "github.com/blang/semver/v4" - "github.com/google/go-cmp/cmp" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" ) const ( @@ -583,910 +580,6 @@ func genPatchString(containers []TestContainerInfo) (string, error) { return string(patchBytes), nil } -func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, new *v1.Node) error { - oldData, err := json.Marshal(old) - if err != nil { - return err - } - - newData, err := json.Marshal(new) - if err != nil { - return err - } - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{}) - if err != nil { - return fmt.Errorf("failed to create merge patch for node %q: %w", old.Name, err) - } - _, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") - return err -} - -func addExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string, extendedResourceQuantity resource.Quantity) { - extendedResource := v1.ResourceName(extendedResourceName) - - ginkgo.By("Adding a custom resource") - OriginalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) - framework.ExpectNoError(err) - - node := OriginalNode.DeepCopy() - node.Status.Capacity[extendedResource] = extendedResourceQuantity - node.Status.Allocatable[extendedResource] = extendedResourceQuantity - err = patchNode(context.Background(), clientSet, OriginalNode.DeepCopy(), node) - framework.ExpectNoError(err) - - gomega.Eventually(func() error { - node, err = clientSet.CoreV1().Nodes().Get(context.Background(), node.Name, metav1.GetOptions{}) - framework.ExpectNoError(err) - - fakeResourceCapacity, exists := node.Status.Capacity[extendedResource] - if !exists { - return fmt.Errorf("node %s has no %s resource capacity", node.Name, extendedResourceName) - } - if expectedResource := resource.MustParse("123"); fakeResourceCapacity.Cmp(expectedResource) != 0 { - return fmt.Errorf("node %s has resource capacity %s, expected: %s", node.Name, fakeResourceCapacity.String(), expectedResource.String()) - } - - return nil - }).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred()) -} - -func removeExtendedResource(clientSet clientset.Interface, nodeName, extendedResourceName string) { - extendedResource := v1.ResourceName(extendedResourceName) - - ginkgo.By("Removing a custom resource") - originalNode, err := clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) - framework.ExpectNoError(err) - - node := originalNode.DeepCopy() - delete(node.Status.Capacity, extendedResource) - delete(node.Status.Allocatable, extendedResource) - err = patchNode(context.Background(), clientSet, originalNode.DeepCopy(), node) - framework.ExpectNoError(err) - - gomega.Eventually(func() error { - node, err = clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) - framework.ExpectNoError(err) - - if _, exists := node.Status.Capacity[extendedResource]; exists { - return fmt.Errorf("node %s has resource capacity %s which is expected to be removed", node.Name, extendedResourceName) - } - - return nil - }).WithTimeout(30 * time.Second).WithPolling(time.Second).ShouldNot(gomega.HaveOccurred()) -} - -func doPodResizeTests() { - f := framework.NewDefaultFramework("pod-resize") - var podClient *e2epod.PodClient - - ginkgo.BeforeEach(func() { - podClient = e2epod.NewPodClient(f) - }) - - type testCase struct { - name string - containers []TestContainerInfo - patchString string - expected []TestContainerInfo - addExtendedResource bool - } - - noRestart := v1.NotRequired - doRestart := v1.RestartContainer - tests := []testCase{ - { - name: "Guaranteed QoS pod, one container - increase CPU & memory", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - }, - { - name: "Guaranteed QoS pod, one container - decrease CPU & memory", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"100m","memory":"250Mi"},"limits":{"cpu":"100m","memory":"250Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "250Mi", MemLim: "250Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - }, - { - name: "Guaranteed QoS pod, one container - increase CPU & decrease memory", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"100Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "100Mi", MemLim: "100Mi"}, - }, - }, - }, - { - name: "Guaranteed QoS pod, one container - decrease CPU & increase memory", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"300Mi"},"limits":{"cpu":"50m","memory":"300Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "50m", CPULim: "50m", MemReq: "300Mi", MemLim: "300Mi"}, - }, - }, - }, - { - name: "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "200Mi", MemLim: "200Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "300Mi", MemLim: "300Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"140m","memory":"50Mi"},"limits":{"cpu":"140m","memory":"50Mi"}}}, - {"name":"c2", "resources":{"requests":{"cpu":"150m","memory":"240Mi"},"limits":{"cpu":"150m","memory":"240Mi"}}}, - {"name":"c3", "resources":{"requests":{"cpu":"340m","memory":"250Mi"},"limits":{"cpu":"340m","memory":"250Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "140m", CPULim: "140m", MemReq: "50Mi", MemLim: "50Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "150m", CPULim: "150m", MemReq: "240Mi", MemLim: "240Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "340m", CPULim: "340m", MemReq: "250Mi", MemLim: "250Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"200Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"limits":{"memory":"400Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "400Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"300Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "300Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory limits only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"limits":{"memory":"600Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "600Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"100m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"limits":{"cpu":"300m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"150m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "150m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU limits only", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"limits":{"cpu":"500m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "500m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"cpu":"200m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"cpu":"400m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase CPU limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"cpu":"500m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "500m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease CPU limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"cpu":"300m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "250Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"memory":"300Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "300Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"memory":"500Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "300Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase memory limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"memory":"500Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease memory limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"memory":"300Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "300Mi", MemLim: "300Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase memory limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"memory":"500Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease memory limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"memory":"400Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase CPU limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"cpu":"300m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "300m", MemReq: "100Mi", MemLim: "400Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease CPU limits", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"cpu":"300m"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "300Mi", MemLim: "400Mi"}, - }, - }, - }, - { - name: "Burstable QoS pod, one container with cpu & memory requests - decrease memory request", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", MemReq: "500Mi"}, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"400Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", MemReq: "400Mi"}, - }, - }, - }, - { - name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &doRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &doRestart, - RestartCount: 1, - }, - }, - }, - { - name: "Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"}, - CPUPolicy: &doRestart, - MemPolicy: &noRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"100Mi"},"limits":{"cpu":"100m","memory":"200Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "50m", CPULim: "100m", MemReq: "100Mi", MemLim: "200Mi"}, - CPUPolicy: &doRestart, - MemPolicy: &noRestart, - RestartCount: 1, - }, - }, - }, - { - name: "Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod)", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &doRestart, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"150m","memory":"150Mi"},"limits":{"cpu":"250m","memory":"250Mi"}}}, - {"name":"c3", "resources":{"requests":{"cpu":"250m","memory":"250Mi"},"limits":{"cpu":"350m","memory":"350Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "150m", CPULim: "250m", MemReq: "150Mi", MemLim: "250Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &doRestart, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "250m", CPULim: "350m", MemReq: "250Mi", MemLim: "350Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - }, - { - name: "Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod)", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &doRestart, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"50Mi"},"limits":{"cpu":"150m","memory":"150Mi"}}}, - {"name":"c2", "resources":{"requests":{"cpu":"350m","memory":"350Mi"},"limits":{"cpu":"450m","memory":"450Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "50m", CPULim: "150m", MemReq: "50Mi", MemLim: "150Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "350m", CPULim: "450m", MemReq: "350Mi", MemLim: "450Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &doRestart, - RestartCount: 1, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - }, - { - name: "Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod)", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"}, - CPUPolicy: &doRestart, - MemPolicy: &doRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"}, - CPUPolicy: &doRestart, - MemPolicy: &noRestart, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &doRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c2", "resources":{"requests":{"cpu":"250m","memory":"250Mi"},"limits":{"cpu":"350m","memory":"350Mi"}}}, - {"name":"c3", "resources":{"requests":{"cpu":"100m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"200Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"}, - CPUPolicy: &doRestart, - MemPolicy: &doRestart, - }, - { - Name: "c2", - Resources: &ContainerResources{CPUReq: "250m", CPULim: "350m", MemReq: "250Mi", MemLim: "350Mi"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - RestartCount: 1, - }, - { - Name: "c3", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"}, - CPUPolicy: &doRestart, - MemPolicy: &doRestart, - RestartCount: 1, - }, - }, - }, - { - name: "Guaranteed QoS pod, one container - increase CPU & memory with an extended resource", - containers: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi", - ExtendedResourceReq: "1", ExtendedResourceLim: "1"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}} - ]}}`, - expected: []TestContainerInfo{ - { - Name: "c1", - Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi", - ExtendedResourceReq: "1", ExtendedResourceLim: "1"}, - CPUPolicy: &noRestart, - MemPolicy: &noRestart, - }, - }, - addExtendedResource: true, - }, - } - - for idx := range tests { - tc := tests[idx] - ginkgo.It(tc.name, func(ctx context.Context) { - var testPod, patchedPod *v1.Pod - var pErr error - - tStamp := strconv.Itoa(time.Now().Nanosecond()) - initDefaultResizePolicy(tc.containers) - initDefaultResizePolicy(tc.expected) - testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers) - - if tc.addExtendedResource { - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet) - framework.ExpectNoError(err) - - for _, node := range nodes.Items { - addExtendedResource(f.ClientSet, node.Name, fakeExtendedResource, resource.MustParse("123")) - } - defer func() { - for _, node := range nodes.Items { - removeExtendedResource(f.ClientSet, node.Name, fakeExtendedResource) - } - }() - } - - ginkgo.By("creating pod") - newPod := podClient.CreateSync(ctx, testPod) - - ginkgo.By("verifying the pod is in kubernetes") - selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": tStamp})) - options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := podClient.List(context.TODO(), options) - framework.ExpectNoError(err, "failed to query for pods") - gomega.Expect(podList.Items).Should(gomega.HaveLen(1)) - - ginkgo.By("verifying initial pod resources, allocations, and policy are as expected") - verifyPodResources(newPod, tc.containers) - verifyPodResizePolicy(newPod, tc.containers) - - ginkgo.By("verifying initial pod status resources and cgroup config are as expected") - verifyPodStatusResources(newPod, tc.containers) - // Check cgroup values only for containerd versions before 1.6.9 - if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) { - if !framework.NodeOSDistroIs("windows") { - verifyPodContainersCgroupValues(newPod, tc.containers, true) - } - } - - patchAndVerify := func(patchString string, expectedContainers []TestContainerInfo, initialContainers []TestContainerInfo, opStr string, isRollback bool) { - ginkgo.By(fmt.Sprintf("patching pod for %s", opStr)) - patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name, - types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{}) - framework.ExpectNoError(pErr, fmt.Sprintf("failed to patch pod for %s", opStr)) - - ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr)) - verifyPodResources(patchedPod, expectedContainers) - verifyPodAllocations(patchedPod, initialContainers, true) - - ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr)) - resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, expectedContainers, initialContainers, isRollback) - - // Check cgroup values only for containerd versions before 1.6.9 - if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) { - ginkgo.By(fmt.Sprintf("verifying pod container's cgroup values after %s", opStr)) - if !framework.NodeOSDistroIs("windows") { - verifyPodContainersCgroupValues(resizedPod, expectedContainers, true) - } - } - - ginkgo.By(fmt.Sprintf("verifying pod resources after %s", opStr)) - verifyPodResources(resizedPod, expectedContainers) - - ginkgo.By(fmt.Sprintf("verifying pod allocations after %s", opStr)) - verifyPodAllocations(resizedPod, expectedContainers, true) - } - - patchAndVerify(tc.patchString, tc.expected, tc.containers, "resize", false) - - rbPatchStr, err := genPatchString(tc.containers) - framework.ExpectNoError(err) - // Resize has been actuated, test rollback - patchAndVerify(rbPatchStr, tc.containers, tc.expected, "rollback", true) - - ginkgo.By("deleting pod") - err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod) - framework.ExpectNoError(err, "failed to delete pod") - }) - } -} - func doPodResizeResourceQuotaTests() { f := framework.NewDefaultFramework("pod-resize-resource-quota") var podClient *e2epod.PodClient @@ -1543,13 +636,6 @@ func doPodResizeResourceQuotaTests() { newPod1 := podClient.CreateSync(ctx, testPod1) newPod2 := podClient.CreateSync(ctx, testPod2) - ginkgo.By("verifying the pod is in kubernetes") - selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": tStamp})) - options := metav1.ListOptions{LabelSelector: selector.String()} - podList, listErr := podClient.List(context.TODO(), options) - framework.ExpectNoError(listErr, "failed to query for pods") - gomega.Expect(podList.Items).Should(gomega.HaveLen(2)) - ginkgo.By("verifying initial pod resources, allocations, and policy are as expected") verifyPodResources(newPod1, containers) @@ -1609,104 +695,6 @@ func doPodResizeResourceQuotaTests() { }) } -func doPodResizeErrorTests() { - f := framework.NewDefaultFramework("pod-resize-errors") - var podClient *e2epod.PodClient - ginkgo.BeforeEach(func() { - podClient = e2epod.NewPodClient(f) - }) - - type testCase struct { - name string - containers []TestContainerInfo - patchString string - patchError string - expected []TestContainerInfo - } - - tests := []testCase{ - { - name: "BestEffort pod - try requesting memory, expect error", - containers: []TestContainerInfo{ - { - Name: "c1", - }, - }, - patchString: `{"spec":{"containers":[ - {"name":"c1", "resources":{"requests":{"memory":"400Mi"}}} - ]}}`, - patchError: "Pod QoS is immutable", - expected: []TestContainerInfo{ - { - Name: "c1", - }, - }, - }, - } - - for idx := range tests { - tc := tests[idx] - ginkgo.It(tc.name, func(ctx context.Context) { - var testPod, patchedPod *v1.Pod - var pErr error - - tStamp := strconv.Itoa(time.Now().Nanosecond()) - initDefaultResizePolicy(tc.containers) - initDefaultResizePolicy(tc.expected) - testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers) - - ginkgo.By("creating pod") - newPod := podClient.CreateSync(ctx, testPod) - - ginkgo.By("verifying the pod is in kubernetes") - selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": tStamp})) - options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := podClient.List(context.TODO(), options) - framework.ExpectNoError(err, "failed to query for pods") - gomega.Expect(podList.Items).Should(gomega.HaveLen(1)) - - ginkgo.By("verifying initial pod resources, allocations, and policy are as expected") - verifyPodResources(newPod, tc.containers) - verifyPodResizePolicy(newPod, tc.containers) - - ginkgo.By("verifying initial pod status resources and cgroup config are as expected") - verifyPodStatusResources(newPod, tc.containers) - if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) { - if !framework.NodeOSDistroIs("windows") { - verifyPodContainersCgroupValues(newPod, tc.containers, true) - } - } - - ginkgo.By("patching pod for resize") - patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name, - types.StrategicMergePatchType, []byte(tc.patchString), metav1.PatchOptions{}) - if tc.patchError == "" { - framework.ExpectNoError(pErr, "failed to patch pod for resize") - } else { - gomega.Expect(pErr).To(gomega.HaveOccurred(), tc.patchError) - patchedPod = newPod - } - - if !isInPlaceResizeSupportedByRuntime(f.ClientSet, patchedPod.Spec.NodeName) { - ginkgo.By("verifying pod container's cgroup values after patch") - if !framework.NodeOSDistroIs("windows") { - verifyPodContainersCgroupValues(patchedPod, tc.expected, true) - } - } - - ginkgo.By("verifying pod resources after patch") - verifyPodResources(patchedPod, tc.expected) - - ginkgo.By("verifying pod allocations after patch") - verifyPodAllocations(patchedPod, tc.expected, true) - - ginkgo.By("deleting pod") - err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod) - framework.ExpectNoError(err, "failed to delete pod") - }) - } -} - func doPodResizeSchedulerTests() { f := framework.NewDefaultFramework("pod-resize-scheduler") var podClient *e2epod.PodClient @@ -1811,7 +799,6 @@ func doPodResizeSchedulerTests() { ginkgo.By(fmt.Sprintf("TEST1: Verify that pod '%s' is running after resize", testPod2.Name)) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, testPod2)) - // // Scheduler focussed pod resize E2E test case #2 // 1. With pod1 + pod2 running on node above, create pod3 that requests more CPU than available, verify pending. // 2. Resize pod1 down so that pod3 gets room to be scheduled. @@ -1878,7 +865,5 @@ var _ = SIGDescribe(framework.WithSerial(), "Pod InPlace Resize Container (sched }) var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalScaling, func() { - doPodResizeTests() doPodResizeResourceQuotaTests() - doPodResizeErrorTests() })