accept rounded container cpu limits in container cgroup tests (#131059)

This commit is contained in:
Natasha Sarkar 2025-03-28 01:40:34 -05:00 committed by GitHub
parent 83bb5d5705
commit 5c7491bf08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 140 additions and 27 deletions

View File

@ -233,10 +233,9 @@ func verifyPodCgroups(ctx context.Context, f *framework.Framework, pod *v1.Pod,
}
cpuLimCgPath := fmt.Sprintf("%s/%s", podCgPath, cgroupv2CPULimit)
cpuQuota := kubecm.MilliCPUToQuota(expectedResources.Limits.Cpu().MilliValue(), kubecm.QuotaPeriod)
expectedCPULimit := strconv.FormatInt(cpuQuota, 10)
expectedCPULimit = fmt.Sprintf("%s %s", expectedCPULimit, CPUPeriod)
err = e2epod.VerifyCgroupValue(f, pod, pod.Spec.Containers[0].Name, cpuLimCgPath, expectedCPULimit)
expectedCPULimits := e2epod.GetCPULimitCgroupExpectations(expectedResources.Limits.Cpu())
err = e2epod.VerifyCgroupValue(f, pod, pod.Spec.Containers[0].Name, cpuLimCgPath, expectedCPULimits...)
if err != nil {
errs = append(errs, fmt.Errorf("failed to verify cpu limit cgroup value: %w", err))
}
@ -395,10 +394,8 @@ func verifyContainersCgroupLimits(f *framework.Framework, pod *v1.Pod) error {
if pod.Spec.Resources != nil && pod.Spec.Resources.Limits.Cpu() != nil &&
container.Resources.Limits.Cpu() == nil {
cpuQuota := kubecm.MilliCPUToQuota(pod.Spec.Resources.Limits.Cpu().MilliValue(), kubecm.QuotaPeriod)
expectedCPULimit := strconv.FormatInt(cpuQuota, 10)
expectedCPULimit = fmt.Sprintf("%s %s", expectedCPULimit, CPUPeriod)
err := e2epod.VerifyCgroupValue(f, pod, container.Name, fmt.Sprintf("%s/%s", cgroupFsPath, cgroupv2CPULimit), expectedCPULimit)
expectedCPULimits := e2epod.GetCPULimitCgroupExpectations(pod.Spec.Resources.Limits.Cpu())
err := e2epod.VerifyCgroupValue(f, pod, container.Name, fmt.Sprintf("%s/%s", cgroupFsPath, cgroupv2CPULimit), expectedCPULimits...)
if err != nil {
errs = append(errs, fmt.Errorf("failed to verify cpu limit cgroup value: %w", err))
}

View File

@ -321,7 +321,7 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
tc := makeResizableContainer(ci)
if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
var expectedCPUShares int64
var expectedCPULimitString, expectedMemLimitString string
var expectedMemLimitString string
expectedMemLimitInBytes := tc.Resources.Limits.Memory().Value()
cpuRequest := tc.Resources.Requests.Cpu()
cpuLimit := tc.Resources.Limits.Cpu()
@ -330,17 +330,10 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
} else {
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuRequest.MilliValue()))
}
cpuQuota := kubecm.MilliCPUToQuota(cpuLimit.MilliValue(), kubecm.QuotaPeriod)
if cpuLimit.IsZero() {
cpuQuota = -1
}
expectedCPULimitString = strconv.FormatInt(cpuQuota, 10)
expectedCPULimits := GetCPULimitCgroupExpectations(cpuLimit)
expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10)
if *podOnCgroupv2Node {
if expectedCPULimitString == "-1" {
expectedCPULimitString = "max"
}
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
if expectedMemLimitString == "0" {
expectedMemLimitString = "max"
}
@ -348,10 +341,11 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
}
if expectedMemLimitString != "0" {
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupMemLimit, expectedMemLimitString))
}
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimitString))
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimits...))
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
// TODO(vinaykul,InPlacePodVerticalScaling): Verify oom_score_adj when runc adds support for updating it
// See https://github.com/opencontainers/runc/pull/4669

View File

@ -18,12 +18,15 @@ package pod
import (
"fmt"
"strconv"
"strings"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
psaapi "k8s.io/pod-security-admission/api"
@ -292,20 +295,23 @@ func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerSt
}
// VerifyCgroupValue verifies that the given cgroup path has the expected value in
// the specified container of the pod. It execs into the container to retrive the
// cgroup value and compares it against the expected value.
func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath, expectedCgValue string) error {
// the specified container of the pod. It execs into the container to retrieve the
// cgroup value, and ensures that the retrieved cgroup value is equivalent to at
// least one of the values in expectedCgValues.
func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath string, expectedCgValues ...string) error {
cmd := fmt.Sprintf("head -n 1 %s", cgPath)
framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s",
pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
framework.Logf("Namespace %s Pod %s Container %s - looking for one of the expected cgroup values %s in path %s",
pod.Namespace, pod.Name, cName, expectedCgValues, cgPath)
cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
if err != nil {
return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
return fmt.Errorf("failed to find one of the expected cgroup values %q in container cgroup %q", expectedCgValues, cgPath)
}
cgValue = strings.Trim(cgValue, "\n")
if cgValue != expectedCgValue {
return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
if err := framework.Gomega().Expect(cgValue).To(gomega.BeElementOf(expectedCgValues)); err != nil {
return fmt.Errorf("value of cgroup %q for container %q should match one of the expectations: %w", cgPath, cName, err)
}
return nil
}
@ -338,3 +344,35 @@ func IsPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
}
return len(out) != 0
}
// TODO: Remove the rounded cpu limit values when https://github.com/opencontainers/runc/issues/4622
// is fixed.
func GetCPULimitCgroupExpectations(cpuLimit *resource.Quantity) []string {
var expectedCPULimits []string
milliCPULimit := cpuLimit.MilliValue()
cpuQuota := kubecm.MilliCPUToQuota(milliCPULimit, kubecm.QuotaPeriod)
if cpuLimit.IsZero() {
cpuQuota = -1
}
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuota))
if milliCPULimit%10 != 0 && cpuQuota != -1 {
roundedCPULimit := (milliCPULimit/10 + 1) * 10
cpuQuotaRounded := kubecm.MilliCPUToQuota(roundedCPULimit, kubecm.QuotaPeriod)
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuotaRounded))
}
return expectedCPULimits
}
func getExpectedCPULimitFromCPUQuota(cpuQuota int64) string {
expectedCPULimitString := strconv.FormatInt(cpuQuota, 10)
if *podOnCgroupv2Node {
if expectedCPULimitString == "-1" {
expectedCPULimitString = "max"
}
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
}
return expectedCPULimitString
}

View File

@ -0,0 +1,84 @@
//go:build linux
// +build linux
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource"
)
func TestGetCPULimitCgroupExpectations(t *testing.T) {
testCases := []struct {
name string
cpuLimit *resource.Quantity
podOnCgroupv2Node bool
expected []string
}{
{
name: "rounding required, podOnCGroupv2Node=true",
cpuLimit: resource.NewMilliQuantity(15, resource.DecimalSI),
podOnCgroupv2Node: true,
expected: []string{"1500 100000", "2000 100000"},
},
{
name: "rounding not required, podOnCGroupv2Node=true",
cpuLimit: resource.NewMilliQuantity(20, resource.DecimalSI),
podOnCgroupv2Node: true,
expected: []string{"2000 100000"},
},
{
name: "rounding required, podOnCGroupv2Node=false",
cpuLimit: resource.NewMilliQuantity(15, resource.DecimalSI),
podOnCgroupv2Node: false,
expected: []string{"1500", "2000"},
},
{
name: "rounding not required, podOnCGroupv2Node=false",
cpuLimit: resource.NewMilliQuantity(20, resource.DecimalSI),
podOnCgroupv2Node: false,
expected: []string{"2000"},
},
{
name: "cpuQuota=0, podOnCGroupv2Node=true",
cpuLimit: resource.NewMilliQuantity(0, resource.DecimalSI),
podOnCgroupv2Node: true,
expected: []string{"max 100000"},
},
{
name: "cpuQuota=0, podOnCGroupv2Node=false",
cpuLimit: resource.NewMilliQuantity(0, resource.DecimalSI),
podOnCgroupv2Node: false,
expected: []string{"-1"},
},
}
originalPodOnCgroupv2Node := podOnCgroupv2Node
t.Cleanup(func() { podOnCgroupv2Node = originalPodOnCgroupv2Node })
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
podOnCgroupv2Node = &tc.podOnCgroupv2Node
actual := GetCPULimitCgroupExpectations(tc.cpuLimit)
assert.Equal(t, tc.expected, actual)
})
}
}