diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 7125bc64969..54f7109a99d 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -21,6 +21,8 @@ go_library( "util_sriov.go", "util_xfs_linux.go", "util_xfs_unsupported.go", + "utils_linux.go", + "utils_unsupported.go", ], importpath = "k8s.io/kubernetes/test/e2e_node", visibility = ["//visibility:public"], diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index c15d2a2a541..2f3a933774c 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -50,8 +50,13 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity) cgroupFsName = cgroupName.ToCgroupfs() } - // this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes - command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName) + command := "" + // this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB. + if IsCgroup2UnifiedMode() { + command = fmt.Sprintf("expected=%v; actual=$(cat /tmp/%v/hugetlb.2MB.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName) + } else { + command = fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName) + } framework.Logf("Pod to run command: %v", command) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 45cf79f73df..d128cecaee6 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -149,6 +149,11 @@ func destroyTemporaryCgroupsForReservation(cgroupManager cm.CgroupManager) error return cgroupManager.Destroy(cgroupConfig) } +// convertSharesToWeight converts from cgroup v1 cpu.shares to cgroup v2 cpu.weight +func convertSharesToWeight(shares int64) int64 { + return 1 + ((shares-2)*9999)/262142 +} + func runTest(f *framework.Framework) error { var oldCfg *kubeletconfig.KubeletConfiguration subsystems, err := cm.GetCgroupSubsystems() @@ -187,8 +192,14 @@ func runTest(f *framework.Framework) error { expectedNAPodCgroup := cm.ParseCgroupfsToCgroupName(currentConfig.CgroupRoot) expectedNAPodCgroup = cm.NewCgroupName(expectedNAPodCgroup, "kubepods") if !cgroupManager.Exists(expectedNAPodCgroup) { - return fmt.Errorf("Expected Node Allocatable Cgroup Does not exist") + return fmt.Errorf("Expected Node Allocatable Cgroup %q does not exist", expectedNAPodCgroup) } + + memoryLimitFile := "memory.limit_in_bytes" + if IsCgroup2UnifiedMode() { + memoryLimitFile = "memory.max" + } + // TODO: Update cgroupManager to expose a Status interface to get current Cgroup Settings. // The node may not have updated capacity and allocatable yet, so check that it happens eventually. gomega.Eventually(func() error { @@ -199,20 +210,33 @@ func runTest(f *framework.Framework) error { if len(nodeList.Items) != 1 { return fmt.Errorf("Unexpected number of node objects for node e2e. Expects only one node: %+v", nodeList) } + cgroupName := "kubepods" + if currentConfig.CgroupDriver == "systemd" { + cgroupName = "kubepods.slice" + } + node := nodeList.Items[0] capacity := node.Status.Capacity allocatableCPU, allocatableMemory, allocatablePIDs := getAllocatableLimits("200m", "200Mi", "1738", capacity) // Total Memory reservation is 200Mi excluding eviction thresholds. // Expect CPU shares on node allocatable cgroup to equal allocatable. - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], "kubepods", "cpu.shares"), int64(cm.MilliCPUToShares(allocatableCPU.MilliValue())), 10); err != nil { - return err + shares := int64(cm.MilliCPUToShares(allocatableCPU.MilliValue())) + if IsCgroup2UnifiedMode() { + // convert to the cgroup v2 cpu.weight value + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupName, "cpu.weight"), convertSharesToWeight(shares), 10); err != nil { + return err + } + } else { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupName, "cpu.shares"), shares, 10); err != nil { + return err + } } // Expect Memory limit on node allocatable cgroup to equal allocatable. - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], "kubepods", "memory.limit_in_bytes"), allocatableMemory.Value(), 0); err != nil { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupName, memoryLimitFile), allocatableMemory.Value(), 0); err != nil { return err } // Expect PID limit on node allocatable cgroup to equal allocatable. - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], "kubepods", "pids.max"), allocatablePIDs.Value(), 0); err != nil { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], cgroupName, "pids.max"), allocatablePIDs.Value(), 0); err != nil { return err } @@ -235,42 +259,61 @@ func runTest(f *framework.Framework) error { return nil }, time.Minute, 5*time.Second).Should(gomega.BeNil()) - kubeReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup) - if !cgroupManager.Exists(kubeReservedCgroupName) { - return fmt.Errorf("Expected kube reserved cgroup Does not exist") + cgroupPath := "" + if currentConfig.CgroupDriver == "systemd" { + cgroupPath = cm.ParseSystemdToCgroupName(kubeReservedCgroup).ToSystemd() + } else { + cgroupPath = cgroupManager.Name(cm.NewCgroupName(cm.RootCgroupName, kubeReservedCgroup)) } // Expect CPU shares on kube reserved cgroup to equal it's reservation which is `100m`. kubeReservedCPU := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceCPU)]) - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupManager.Name(kubeReservedCgroupName), "cpu.shares"), int64(cm.MilliCPUToShares(kubeReservedCPU.MilliValue())), 10); err != nil { - return err + shares := int64(cm.MilliCPUToShares(kubeReservedCPU.MilliValue())) + if IsCgroup2UnifiedMode() { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupPath, "cpu.weight"), convertSharesToWeight(shares), 10); err != nil { + return err + } + } else { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupPath, "cpu.shares"), shares, 10); err != nil { + return err + } } // Expect Memory limit kube reserved cgroup to equal configured value `100Mi`. kubeReservedMemory := resource.MustParse(currentConfig.KubeReserved[string(v1.ResourceMemory)]) - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupManager.Name(kubeReservedCgroupName), "memory.limit_in_bytes"), kubeReservedMemory.Value(), 0); err != nil { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupPath, memoryLimitFile), kubeReservedMemory.Value(), 0); err != nil { return err } // Expect process ID limit kube reserved cgroup to equal configured value `738`. kubeReservedPIDs := resource.MustParse(currentConfig.KubeReserved[string(pidlimit.PIDs)]) - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], cgroupManager.Name(kubeReservedCgroupName), "pids.max"), kubeReservedPIDs.Value(), 0); err != nil { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], cgroupPath, "pids.max"), kubeReservedPIDs.Value(), 0); err != nil { return err } - systemReservedCgroupName := cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup) - if !cgroupManager.Exists(systemReservedCgroupName) { - return fmt.Errorf("Expected system reserved cgroup Does not exist") + + if currentConfig.CgroupDriver == "systemd" { + cgroupPath = cm.ParseSystemdToCgroupName(systemReservedCgroup).ToSystemd() + } else { + cgroupPath = cgroupManager.Name(cm.NewCgroupName(cm.RootCgroupName, systemReservedCgroup)) } + // Expect CPU shares on system reserved cgroup to equal it's reservation which is `100m`. systemReservedCPU := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceCPU)]) - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupManager.Name(systemReservedCgroupName), "cpu.shares"), int64(cm.MilliCPUToShares(systemReservedCPU.MilliValue())), 10); err != nil { - return err + shares = int64(cm.MilliCPUToShares(systemReservedCPU.MilliValue())) + if IsCgroup2UnifiedMode() { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupPath, "cpu.weight"), convertSharesToWeight(shares), 10); err != nil { + return err + } + } else { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["cpu"], cgroupPath, "cpu.shares"), shares, 10); err != nil { + return err + } } // Expect Memory limit on node allocatable cgroup to equal allocatable. systemReservedMemory := resource.MustParse(currentConfig.SystemReserved[string(v1.ResourceMemory)]) - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupManager.Name(systemReservedCgroupName), "memory.limit_in_bytes"), systemReservedMemory.Value(), 0); err != nil { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["memory"], cgroupPath, memoryLimitFile), systemReservedMemory.Value(), 0); err != nil { return err } // Expect process ID limit system reserved cgroup to equal configured value `1000`. systemReservedPIDs := resource.MustParse(currentConfig.SystemReserved[string(pidlimit.PIDs)]) - if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], cgroupManager.Name(systemReservedCgroupName), "pids.max"), systemReservedPIDs.Value(), 0); err != nil { + if err := expectFileValToEqual(filepath.Join(subsystems.MountPoints["pids"], cgroupPath, "pids.max"), systemReservedPIDs.Value(), 0); err != nil { return err } return nil diff --git a/test/e2e_node/pids_test.go b/test/e2e_node/pids_test.go index 9d5d34ecb0f..0c3192aaefd 100644 --- a/test/e2e_node/pids_test.go +++ b/test/e2e_node/pids_test.go @@ -45,7 +45,13 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod { } // this command takes the expected value and compares it against the actual value for the pod cgroup pids.max - command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/pids/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName) + command := "" + if IsCgroup2UnifiedMode() { + command = fmt.Sprintf("expected=%v; actual=$(cat /tmp/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName) + } else { + command = fmt.Sprintf("expected=%v; actual=$(cat /tmp/pids/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName) + } + framework.Logf("Pod to run command: %v", command) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index 4551c8a4cbe..540aa098db8 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -75,8 +75,14 @@ func makePodToVerifyCgroups(cgroupNames []string) *v1.Pod { klog.Infof("expecting %v cgroups to be found", cgroupFsNames) // build the pod command to either verify cgroups exist command := "" + for _, cgroupFsName := range cgroupFsNames { - localCommand := "if [ ! -d /tmp/memory/" + cgroupFsName + " ] || [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 1; fi; " + localCommand := "" + if IsCgroup2UnifiedMode() { + localCommand = "if [ ! -d /tmp/" + cgroupFsName + " ]; then exit 1; fi; " + } else { + localCommand = "if [ ! -d /tmp/memory/" + cgroupFsName + " ] || [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 1; fi; " + } command += localCommand } @@ -117,6 +123,14 @@ func makePodToVerifyCgroupRemoved(baseName string) *v1.Pod { components := strings.Split(baseName, "/") cgroupName := cm.NewCgroupName(cm.RootCgroupName, components...) cgroupFsName := toCgroupFsName(cgroupName) + + command := "" + if IsCgroup2UnifiedMode() { + command = "for i in `seq 1 10`; do if [ ! -d /tmp/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1" + } else { + command = "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1" + } + pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod" + string(uuid.NewUUID()), @@ -127,7 +141,7 @@ func makePodToVerifyCgroupRemoved(baseName string) *v1.Pod { { Image: busyboxImage, Name: "container" + string(uuid.NewUUID()), - Command: []string{"sh", "-c", "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"}, + Command: []string{"sh", "-c", command}, VolumeMounts: []v1.VolumeMount{ { Name: "sysfscgroup", diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index 7ffd95d675f..a6e863f2a6b 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -194,6 +194,10 @@ func (e *E2EServices) startKubelet() (*server, error) { unitName := fmt.Sprintf("kubelet-%s.service", unitTimestamp) cmdArgs = append(cmdArgs, systemdRun, + "-p", "Delegate=true", + "-p", "CPUAccounting=true", + "-p", "MemoryAccounting=true", + "-p", "TasksAccounting=true", "--unit="+unitName, "--slice=runtime.slice", "--remain-after-exit", diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index acd6fe55375..c2f5d350635 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -435,7 +435,11 @@ func recordSystemCgroupProcesses() { continue } - pids, err := ioutil.ReadFile(fmt.Sprintf("/sys/fs/cgroup/cpu/%s/cgroup.procs", cgroup)) + filePattern := "/sys/fs/cgroup/cpu/%s/cgroup.procs" + if IsCgroup2UnifiedMode() { + filePattern = "/sys/fs/cgroup/%s/cgroup.procs" + } + pids, err := ioutil.ReadFile(fmt.Sprintf(filePattern, cgroup)) if err != nil { framework.Logf("Failed to read processes in cgroup %s: %v", name, err) continue diff --git a/test/e2e_node/utils_linux.go b/test/e2e_node/utils_linux.go new file mode 100644 index 00000000000..50f59401ba9 --- /dev/null +++ b/test/e2e_node/utils_linux.go @@ -0,0 +1,28 @@ +// +build linux + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2enode + +import ( + libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" +) + +// IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode. +func IsCgroup2UnifiedMode() bool { + return libcontainercgroups.IsCgroup2UnifiedMode() +} diff --git a/test/e2e_node/utils_unsupported.go b/test/e2e_node/utils_unsupported.go new file mode 100644 index 00000000000..69322913b75 --- /dev/null +++ b/test/e2e_node/utils_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2enode + +// IsCgroup2UnifiedMode returns whether we are running in cgroup v2 unified mode. +func IsCgroup2UnifiedMode() bool { + return false +}