Merge pull request #131267 from tallclair/resize-helpers

Move pod resize e2e utilities out of e2e/framework
This commit is contained in:
Kubernetes Prow Robot
2025-04-24 01:28:44 -07:00
committed by GitHub
6 changed files with 332 additions and 332 deletions

View File

@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
package podresize
import (
"context"
@@ -33,6 +33,7 @@ import (
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
kubeqos "k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo/v2"
@@ -302,7 +303,7 @@ func verifyPodContainersStatusResources(gotCtrStatuses []v1.ContainerStatus, wan
func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework, pod *v1.Pod, tcInfo []ResizableContainerInfo) error {
ginkgo.GinkgoHelper()
if podOnCgroupv2Node == nil {
value := IsPodOnCgroupv2Node(f, pod)
value := e2epod.IsPodOnCgroupv2Node(f, pod)
podOnCgroupv2Node = &value
}
cgroupMemLimit := Cgroupv2MemLimit
@@ -344,10 +345,10 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
}
if expectedMemLimitString != "0" {
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupMemLimit, expectedMemLimitString))
errs = append(errs, e2epod.VerifyCgroupValue(f, pod, ci.Name, cgroupMemLimit, expectedMemLimitString))
}
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimits...))
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
errs = append(errs, e2epod.VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimits...))
errs = append(errs, e2epod.VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
// TODO(vinaykul,InPlacePodVerticalScaling): Verify oom_score_adj when runc adds support for updating it
// See https://github.com/opencontainers/runc/pull/4669
}
@@ -393,7 +394,7 @@ func verifyContainerRestarts(f *framework.Framework, pod *v1.Pod, gotStatuses []
}
func verifyOomScoreAdj(f *framework.Framework, pod *v1.Pod, containerName string) error {
container := FindContainerInPod(pod, containerName)
container := e2epod.FindContainerInPod(pod, containerName)
if container == nil {
return fmt.Errorf("failed to find container %s in pod %s", containerName, pod.Name)
}
@@ -407,10 +408,10 @@ func verifyOomScoreAdj(f *framework.Framework, pod *v1.Pod, containerName string
oomScoreAdj := kubeqos.GetContainerOOMScoreAdjust(pod, container, int64(nodeMemoryCapacity.Value()))
expectedOomScoreAdj := strconv.FormatInt(int64(oomScoreAdj), 10)
return VerifyOomScoreAdjValue(f, pod, container.Name, expectedOomScoreAdj)
return e2epod.VerifyOomScoreAdjValue(f, pod, container.Name, expectedOomScoreAdj)
}
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo) *v1.Pod {
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *e2epod.PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo) *v1.Pod {
ginkgo.GinkgoHelper()
// Wait for resize to complete.
@@ -535,3 +536,35 @@ func formatErrors(err error) error {
}
return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n"))
}
// TODO: Remove the rounded cpu limit values when https://github.com/opencontainers/runc/issues/4622
// is fixed.
func GetCPULimitCgroupExpectations(cpuLimit *resource.Quantity) []string {
var expectedCPULimits []string
milliCPULimit := cpuLimit.MilliValue()
cpuQuota := kubecm.MilliCPUToQuota(milliCPULimit, kubecm.QuotaPeriod)
if cpuLimit.IsZero() {
cpuQuota = -1
}
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuota))
if milliCPULimit%10 != 0 && cpuQuota != -1 {
roundedCPULimit := (milliCPULimit/10 + 1) * 10
cpuQuotaRounded := kubecm.MilliCPUToQuota(roundedCPULimit, kubecm.QuotaPeriod)
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuotaRounded))
}
return expectedCPULimits
}
func getExpectedCPULimitFromCPUQuota(cpuQuota int64) string {
expectedCPULimitString := strconv.FormatInt(cpuQuota, 10)
if *podOnCgroupv2Node {
if expectedCPULimitString == "-1" {
expectedCPULimitString = "max"
}
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
}
return expectedCPULimitString
}

View File

@@ -17,7 +17,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
package podresize
import (
"testing"

View File

@@ -30,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/common/node/framework/podresize"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@@ -44,7 +45,6 @@ const (
cgroupv2CPULimit string = "cpu.max"
cgroupv2MemLimit string = "memory.max"
cgroupFsPath string = "/sys/fs/cgroup"
CPUPeriod string = "100000"
mountPath string = "/sysfscgroup"
)
@@ -233,7 +233,7 @@ func verifyPodCgroups(ctx context.Context, f *framework.Framework, pod *v1.Pod,
}
cpuLimCgPath := fmt.Sprintf("%s/%s", podCgPath, cgroupv2CPULimit)
expectedCPULimits := e2epod.GetCPULimitCgroupExpectations(expectedResources.Limits.Cpu())
expectedCPULimits := podresize.GetCPULimitCgroupExpectations(expectedResources.Limits.Cpu())
err = e2epod.VerifyCgroupValue(f, pod, pod.Spec.Containers[0].Name, cpuLimCgPath, expectedCPULimits...)
if err != nil {
@@ -394,7 +394,7 @@ func verifyContainersCgroupLimits(f *framework.Framework, pod *v1.Pod) error {
if pod.Spec.Resources != nil && pod.Spec.Resources.Limits.Cpu() != nil &&
container.Resources.Limits.Cpu() == nil {
expectedCPULimits := e2epod.GetCPULimitCgroupExpectations(pod.Spec.Resources.Limits.Cpu())
expectedCPULimits := podresize.GetCPULimitCgroupExpectations(pod.Spec.Resources.Limits.Cpu())
err := e2epod.VerifyCgroupValue(f, pod, container.Name, fmt.Sprintf("%s/%s", cgroupFsPath, cgroupv2CPULimit), expectedCPULimits...)
if err != nil {
errs = append(errs, fmt.Errorf("failed to verify cpu limit cgroup value: %w", err))

File diff suppressed because it is too large Load Diff

View File

@@ -18,15 +18,12 @@ package pod
import (
"fmt"
"strconv"
"strings"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
psaapi "k8s.io/pod-security-admission/api"
@@ -344,35 +341,3 @@ func IsPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
}
return len(out) != 0
}
// TODO: Remove the rounded cpu limit values when https://github.com/opencontainers/runc/issues/4622
// is fixed.
func GetCPULimitCgroupExpectations(cpuLimit *resource.Quantity) []string {
var expectedCPULimits []string
milliCPULimit := cpuLimit.MilliValue()
cpuQuota := kubecm.MilliCPUToQuota(milliCPULimit, kubecm.QuotaPeriod)
if cpuLimit.IsZero() {
cpuQuota = -1
}
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuota))
if milliCPULimit%10 != 0 && cpuQuota != -1 {
roundedCPULimit := (milliCPULimit/10 + 1) * 10
cpuQuotaRounded := kubecm.MilliCPUToQuota(roundedCPULimit, kubecm.QuotaPeriod)
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuotaRounded))
}
return expectedCPULimits
}
func getExpectedCPULimitFromCPUQuota(cpuQuota int64) string {
expectedCPULimitString := strconv.FormatInt(cpuQuota, 10)
if *podOnCgroupv2Node {
if expectedCPULimitString == "-1" {
expectedCPULimitString = "max"
}
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
}
return expectedCPULimitString
}

View File

@@ -29,6 +29,7 @@ import (
helpers "k8s.io/component-helpers/resource"
resourceapi "k8s.io/kubernetes/pkg/api/v1/resource"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/common/node/framework/podresize"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@@ -114,19 +115,19 @@ func doPodResizeAdmissionPluginsTests() {
f := framework.NewDefaultFramework(tc.name)
ginkgo.It(tc.name, func(ctx context.Context) {
containers := []e2epod.ResizableContainerInfo{
containers := []podresize.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "300Mi", MemLim: "300Mi"},
Resources: &podresize.ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "300Mi", MemLim: "300Mi"},
},
}
patchString := `{"spec":{"containers":[
{"name":"c1", "resources":{"requests":{"cpu":"400m","memory":"400Mi"},"limits":{"cpu":"400m","memory":"400Mi"}}}
]}}`
expected := []e2epod.ResizableContainerInfo{
expected := []podresize.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: "400m", CPULim: "400m", MemReq: "400Mi", MemLim: "400Mi"},
Resources: &podresize.ContainerResources{CPUReq: "400m", CPULim: "400m", MemReq: "400Mi", MemLim: "400Mi"},
},
}
patchStringExceedCPU := `{"spec":{"containers":[
@@ -139,9 +140,9 @@ func doPodResizeAdmissionPluginsTests() {
tc.enableAdmissionPlugin(ctx, f)
tStamp := strconv.Itoa(time.Now().Nanosecond())
testPod1 := e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, containers)
testPod1 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, containers)
testPod1 = e2epod.MustMixinRestrictedPodSecurity(testPod1)
testPod2 := e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, containers)
testPod2 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, containers)
testPod2 = e2epod.MustMixinRestrictedPodSecurity(testPod2)
ginkgo.By("creating pods")
@@ -149,23 +150,23 @@ func doPodResizeAdmissionPluginsTests() {
newPods := podClient.CreateBatch(ctx, []*v1.Pod{testPod1, testPod2})
ginkgo.By("verifying initial pod resources, and policy are as expected")
e2epod.VerifyPodResources(newPods[0], containers)
podresize.VerifyPodResources(newPods[0], containers)
ginkgo.By("patching pod for resize within resource quota")
patchedPod, pErr := f.ClientSet.CoreV1().Pods(newPods[0].Namespace).Patch(ctx, newPods[0].Name,
types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{}, "resize")
framework.ExpectNoError(pErr, "failed to patch pod for resize")
expected = e2epod.UpdateExpectedContainerRestarts(ctx, patchedPod, expected)
expected = podresize.UpdateExpectedContainerRestarts(ctx, patchedPod, expected)
ginkgo.By("verifying pod patched for resize within resource quota")
e2epod.VerifyPodResources(patchedPod, expected)
podresize.VerifyPodResources(patchedPod, expected)
ginkgo.By("waiting for resize to be actuated")
resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPods[0], expected)
e2epod.ExpectPodResized(ctx, f, resizedPod, expected)
resizedPod := podresize.WaitForPodResizeActuation(ctx, f, podClient, newPods[0], expected)
podresize.ExpectPodResized(ctx, f, resizedPod, expected)
ginkgo.By("verifying pod resources after resize")
e2epod.VerifyPodResources(resizedPod, expected)
podresize.VerifyPodResources(resizedPod, expected)
ginkgo.By("patching pod for resize with memory exceeding resource quota")
framework.ExpectNoError(framework.Gomega().
@@ -181,8 +182,8 @@ func doPodResizeAdmissionPluginsTests() {
ginkgo.By("verifying pod patched for resize exceeding memory resource quota remains unchanged")
patchedPodExceedMemory, pErrEx2 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
framework.ExpectNoError(pErrEx2, "failed to get pod post exceed memory resize")
e2epod.VerifyPodResources(patchedPodExceedMemory, expected)
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected))
podresize.VerifyPodResources(patchedPodExceedMemory, expected)
framework.ExpectNoError(podresize.VerifyPodStatusResources(patchedPodExceedMemory, expected))
ginkgo.By(fmt.Sprintf("patching pod %s for resize with CPU exceeding resource quota", resizedPod.Name))
framework.ExpectNoError(framework.Gomega().
@@ -198,8 +199,8 @@ func doPodResizeAdmissionPluginsTests() {
ginkgo.By("verifying pod patched for resize exceeding CPU resource quota remains unchanged")
patchedPodExceedCPU, pErrEx1 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
framework.ExpectNoError(pErrEx1, "failed to get pod post exceed CPU resize")
e2epod.VerifyPodResources(patchedPodExceedCPU, expected)
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected))
podresize.VerifyPodResources(patchedPodExceedCPU, expected)
framework.ExpectNoError(podresize.VerifyPodStatusResources(patchedPodExceedMemory, expected))
})
}
@@ -255,16 +256,16 @@ func doPodResizeSchedulerTests(f *framework.Framework) {
framework.Logf("TEST1: testPod2 initial CPU request is '%dm'", testPod2CPUQuantity.MilliValue())
framework.Logf("TEST1: testPod2 resized CPU request is '%dm'", testPod2CPUQuantityResized.MilliValue())
c1 := []e2epod.ResizableContainerInfo{
c1 := []podresize.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: testPod1CPUQuantity.String(), CPULim: testPod1CPUQuantity.String()},
Resources: &podresize.ContainerResources{CPUReq: testPod1CPUQuantity.String(), CPULim: testPod1CPUQuantity.String()},
},
}
c2 := []e2epod.ResizableContainerInfo{
c2 := []podresize.ResizableContainerInfo{
{
Name: "c2",
Resources: &e2epod.ContainerResources{CPUReq: testPod2CPUQuantity.String(), CPULim: testPod2CPUQuantity.String()},
Resources: &podresize.ContainerResources{CPUReq: testPod2CPUQuantity.String(), CPULim: testPod2CPUQuantity.String()},
},
}
patchTestpod2ToFitNode := fmt.Sprintf(`{
@@ -279,9 +280,9 @@ func doPodResizeSchedulerTests(f *framework.Framework) {
}`, testPod2CPUQuantityResized.MilliValue(), testPod2CPUQuantityResized.MilliValue())
tStamp := strconv.Itoa(time.Now().Nanosecond())
testPod1 := e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, c1)
testPod1 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod1", tStamp, c1)
testPod1 = e2epod.MustMixinRestrictedPodSecurity(testPod1)
testPod2 := e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, c2)
testPod2 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod2", tStamp, c2)
testPod2 = e2epod.MustMixinRestrictedPodSecurity(testPod2)
e2epod.SetNodeAffinity(&testPod1.Spec, node.Name)
e2epod.SetNodeAffinity(&testPod2.Spec, node.Name)
@@ -319,10 +320,10 @@ func doPodResizeSchedulerTests(f *framework.Framework) {
testPod1CPUQuantityResized := resource.NewMilliQuantity(testPod1CPUQuantity.MilliValue()/3, resource.DecimalSI)
framework.Logf("TEST2: testPod1 MilliCPUs after resize '%dm'", testPod1CPUQuantityResized.MilliValue())
c3 := []e2epod.ResizableContainerInfo{
c3 := []podresize.ResizableContainerInfo{
{
Name: "c3",
Resources: &e2epod.ContainerResources{CPUReq: testPod3CPUQuantity.String(), CPULim: testPod3CPUQuantity.String()},
Resources: &podresize.ContainerResources{CPUReq: testPod3CPUQuantity.String(), CPULim: testPod3CPUQuantity.String()},
},
}
patchTestpod1ToMakeSpaceForPod3 := fmt.Sprintf(`{
@@ -337,7 +338,7 @@ func doPodResizeSchedulerTests(f *framework.Framework) {
}`, testPod1CPUQuantityResized.MilliValue(), testPod1CPUQuantityResized.MilliValue())
tStamp = strconv.Itoa(time.Now().Nanosecond())
testPod3 := e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod3", tStamp, c3)
testPod3 := podresize.MakePodWithResizableContainers(f.Namespace.Name, "testpod3", tStamp, c3)
testPod3 = e2epod.MustMixinRestrictedPodSecurity(testPod3)
e2epod.SetNodeAffinity(&testPod3.Spec, node.Name)
@@ -404,15 +405,15 @@ func doPodResizeSchedulerTests(f *framework.Framework) {
framework.ExpectNoError(delErr3, "failed to delete pod %s", testPod3.Name)
ginkgo.By(fmt.Sprintf("TEST3: Verify pod '%s' is resized successfully after pod deletion '%s' and '%s", testPod1.Name, testPod2.Name, testPod3.Name))
expected := []e2epod.ResizableContainerInfo{
expected := []podresize.ResizableContainerInfo{
{
Name: "c1",
Resources: &e2epod.ContainerResources{CPUReq: testPod1CPUQuantity.String(), CPULim: testPod1CPUQuantity.String()},
Resources: &podresize.ContainerResources{CPUReq: testPod1CPUQuantity.String(), CPULim: testPod1CPUQuantity.String()},
RestartCount: testPod1.Status.ContainerStatuses[0].RestartCount,
},
}
resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, testPod1, expected)
e2epod.ExpectPodResized(ctx, f, resizedPod, expected)
resizedPod := podresize.WaitForPodResizeActuation(ctx, f, podClient, testPod1, expected)
podresize.ExpectPodResized(ctx, f, resizedPod, expected)
ginkgo.By(fmt.Sprintf("TEST3: Resize pod '%s' to exceed the node capacity", testPod1.Name))
testPod1, p1Err = f.ClientSet.CoreV1().Pods(testPod1.Namespace).Patch(ctx,