Make kuberuntime unit tests environment independent + support cgroup v2

Before this commit, to find out the current node's
cgroup version, a libcontainers function was used
directly. This way, cgroup version is hard to mock
and is dependant on the environment in which the unit
tests are being run.

After this commit, libcontainer's function is wrapped
within a variable function that can be re-assigned by
the tests. This way the test can easily mock the cgroup
version and become environment independant.

After this commit both cgroup versions v1 and v2
are being tested, no matter in which environment
it runs.

Signed-off-by: Itamar Holder <iholder@redhat.com>
This commit is contained in:
Itamar Holder 2023-06-20 11:28:21 +02:00
parent f37aec6c57
commit e4da568f33
3 changed files with 178 additions and 56 deletions

View File

@ -46,7 +46,7 @@ func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config
enforceMemoryQoS := false
// Set memory.min and memory.high if MemoryQoS enabled with cgroups v2
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
libcontainercgroups.IsCgroup2UnifiedMode() {
isCgroup2UnifiedMode() {
enforceMemoryQoS = true
}
cl, err := m.generateLinuxContainerConfig(container, pod, uid, username, nsTarget, enforceMemoryQoS)
@ -171,7 +171,7 @@ func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, cont
enforceMemoryQoS := false
// Set memory.min and memory.high if MemoryQoS enabled with cgroups v2
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
libcontainercgroups.IsCgroup2UnifiedMode() {
isCgroup2UnifiedMode() {
enforceMemoryQoS = true
}
return &runtimeapi.ContainerResources{
@ -216,7 +216,7 @@ func (m *kubeGenericRuntimeManager) calculateLinuxResources(cpuRequest, cpuLimit
}
// runc requires cgroupv2 for unified mode
if libcontainercgroups.IsCgroup2UnifiedMode() {
if isCgroup2UnifiedMode() {
resources.Unified = map[string]string{
// Ask the kernel to kill all processes in the container cgroup in case of OOM.
// See memory.oom.group in https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html for
@ -298,3 +298,7 @@ func toKubeContainerResources(statusResources *runtimeapi.ContainerResources) *k
}
return cStatusResources
}
var isCgroup2UnifiedMode = func() bool {
return libcontainercgroups.IsCgroup2UnifiedMode()
}

View File

@ -244,11 +244,12 @@ func TestCalculateLinuxResources(t *testing.T) {
}
tests := []struct {
name string
cpuReq *resource.Quantity
cpuLim *resource.Quantity
memLim *resource.Quantity
expected *runtimeapi.LinuxContainerResources
name string
cpuReq *resource.Quantity
cpuLim *resource.Quantity
memLim *resource.Quantity
expected *runtimeapi.LinuxContainerResources
cgroupVersion CgroupVersion
}{
{
name: "Request128MBLimit256MB",
@ -261,6 +262,7 @@ func TestCalculateLinuxResources(t *testing.T) {
CpuShares: 1024,
MemoryLimitInBytes: 134217728,
},
cgroupVersion: cgroupV1,
},
{
name: "RequestNoMemory",
@ -273,6 +275,7 @@ func TestCalculateLinuxResources(t *testing.T) {
CpuShares: 2048,
MemoryLimitInBytes: 0,
},
cgroupVersion: cgroupV1,
},
{
name: "RequestNilCPU",
@ -284,6 +287,7 @@ func TestCalculateLinuxResources(t *testing.T) {
CpuShares: 2048,
MemoryLimitInBytes: 0,
},
cgroupVersion: cgroupV1,
},
{
name: "RequestZeroCPU",
@ -296,9 +300,66 @@ func TestCalculateLinuxResources(t *testing.T) {
CpuShares: 2,
MemoryLimitInBytes: 0,
},
cgroupVersion: cgroupV1,
},
{
name: "Request128MBLimit256MB",
cpuReq: generateResourceQuantity("1"),
cpuLim: generateResourceQuantity("2"),
memLim: generateResourceQuantity("128Mi"),
expected: &runtimeapi.LinuxContainerResources{
CpuPeriod: 100000,
CpuQuota: 200000,
CpuShares: 1024,
MemoryLimitInBytes: 134217728,
Unified: map[string]string{"memory.oom.group": "1"},
},
cgroupVersion: cgroupV2,
},
{
name: "RequestNoMemory",
cpuReq: generateResourceQuantity("2"),
cpuLim: generateResourceQuantity("8"),
memLim: generateResourceQuantity("0"),
expected: &runtimeapi.LinuxContainerResources{
CpuPeriod: 100000,
CpuQuota: 800000,
CpuShares: 2048,
MemoryLimitInBytes: 0,
Unified: map[string]string{"memory.oom.group": "1"},
},
cgroupVersion: cgroupV2,
},
{
name: "RequestNilCPU",
cpuLim: generateResourceQuantity("2"),
memLim: generateResourceQuantity("0"),
expected: &runtimeapi.LinuxContainerResources{
CpuPeriod: 100000,
CpuQuota: 200000,
CpuShares: 2048,
MemoryLimitInBytes: 0,
Unified: map[string]string{"memory.oom.group": "1"},
},
cgroupVersion: cgroupV2,
},
{
name: "RequestZeroCPU",
cpuReq: generateResourceQuantity("0"),
cpuLim: generateResourceQuantity("2"),
memLim: generateResourceQuantity("0"),
expected: &runtimeapi.LinuxContainerResources{
CpuPeriod: 100000,
CpuQuota: 200000,
CpuShares: 2,
MemoryLimitInBytes: 0,
Unified: map[string]string{"memory.oom.group": "1"},
},
cgroupVersion: cgroupV2,
},
}
for _, test := range tests {
setCgroupVersionDuringTest(test.cgroupVersion)
linuxContainerResources := m.calculateLinuxResources(test.cpuReq, test.cpuLim, test.memLim)
assert.Equal(t, test.expected, linuxContainerResources)
}
@ -888,3 +949,16 @@ func TestGenerateLinuxContainerResources(t *testing.T) {
}
//TODO(vinaykul,InPlacePodVerticalScaling): Add unit tests for cgroup v1 & v2
}
type CgroupVersion string
const (
cgroupV1 CgroupVersion = "v1"
cgroupV2 CgroupVersion = "v2"
)
func setCgroupVersionDuringTest(version CgroupVersion) {
isCgroup2UnifiedMode = func() bool {
return version == cgroupV2
}
}

View File

@ -38,6 +38,59 @@ func TestApplySandboxResources(t *testing.T) {
Linux: &runtimeapi.LinuxPodSandboxConfig{},
}
getPodWithOverhead := func() *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("2"),
},
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("256Mi"),
v1.ResourceCPU: resource.MustParse("4"),
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("1"),
},
},
}
}
getPodWithoutOverhead := func() *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
},
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("256Mi"),
},
},
},
},
},
}
}
require.NoError(t, err)
tests := []struct {
@ -45,36 +98,11 @@ func TestApplySandboxResources(t *testing.T) {
pod *v1.Pod
expectedResource *runtimeapi.LinuxContainerResources
expectedOverhead *runtimeapi.LinuxContainerResources
cgroupVersion CgroupVersion
}{
{
description: "pod with overhead defined",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("2"),
},
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("256Mi"),
v1.ResourceCPU: resource.MustParse("4"),
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
pod: getPodWithOverhead(),
expectedResource: &runtimeapi.LinuxContainerResources{
MemoryLimitInBytes: 268435456,
CpuPeriod: 100000,
@ -87,30 +115,11 @@ func TestApplySandboxResources(t *testing.T) {
CpuQuota: 100000,
CpuShares: 1024,
},
cgroupVersion: cgroupV1,
},
{
description: "pod without overhead defined",
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("128Mi"),
},
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("256Mi"),
},
},
},
},
},
},
pod: getPodWithoutOverhead(),
expectedResource: &runtimeapi.LinuxContainerResources{
MemoryLimitInBytes: 268435456,
CpuPeriod: 100000,
@ -118,10 +127,45 @@ func TestApplySandboxResources(t *testing.T) {
CpuShares: 2,
},
expectedOverhead: &runtimeapi.LinuxContainerResources{},
cgroupVersion: cgroupV1,
},
{
description: "pod with overhead defined",
pod: getPodWithOverhead(),
expectedResource: &runtimeapi.LinuxContainerResources{
MemoryLimitInBytes: 268435456,
CpuPeriod: 100000,
CpuQuota: 400000,
CpuShares: 2048,
Unified: map[string]string{"memory.oom.group": "1"},
},
expectedOverhead: &runtimeapi.LinuxContainerResources{
MemoryLimitInBytes: 134217728,
CpuPeriod: 100000,
CpuQuota: 100000,
CpuShares: 1024,
Unified: map[string]string{"memory.oom.group": "1"},
},
cgroupVersion: cgroupV2,
},
{
description: "pod without overhead defined",
pod: getPodWithoutOverhead(),
expectedResource: &runtimeapi.LinuxContainerResources{
MemoryLimitInBytes: 268435456,
CpuPeriod: 100000,
CpuQuota: 0,
CpuShares: 2,
Unified: map[string]string{"memory.oom.group": "1"},
},
expectedOverhead: &runtimeapi.LinuxContainerResources{},
cgroupVersion: cgroupV2,
},
}
for i, test := range tests {
setCgroupVersionDuringTest(test.cgroupVersion)
m.applySandboxResources(test.pod, config)
assert.Equal(t, test.expectedResource, config.Linux.Resources, "TestCase[%d]: %s", i, test.description)
assert.Equal(t, test.expectedOverhead, config.Linux.Overhead, "TestCase[%d]: %s", i, test.description)