From 5539e6103253c1780deeef759238e738439d3e30 Mon Sep 17 00:00:00 2001 From: mattjmcnaughton Date: Fri, 7 Jun 2019 10:48:42 -0400 Subject: [PATCH] Fix reserved cgroup systemd Fix an issue in which, when trying to specify the `--kube-reserved-cgroup` (or `--system-reserved-cgroup`) with `--cgroup-driver=systemd`, we will not properly convert the `systemd` cgroup name into the internal cgroup name that k8s expects. Without this change, specifying `--kube-reserved-cgroup=/test.slice --cgroup-driver=systemd` will fail, and only `--kube-reserved-cgroup=/test --crgroup-driver=systemd` will succeed, even if the actual cgroup existing on the host is `/test.slice`. Additionally, add light unit testing of our process from converting to a systemd cgroup name to kubernetes internal cgroup name. --- pkg/kubelet/cm/cgroup_manager_linux_test.go | 22 +++++++++++++++++++ .../cm/node_container_manager_linux.go | 4 ++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/pkg/kubelet/cm/cgroup_manager_linux_test.go b/pkg/kubelet/cm/cgroup_manager_linux_test.go index dcadae26770..5c4a6b6392b 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux_test.go +++ b/pkg/kubelet/cm/cgroup_manager_linux_test.go @@ -146,3 +146,25 @@ func TestCgroupNameToCgroupfs(t *testing.T) { } } } + +func TestParseSystemdToCgroupName(t *testing.T) { + testCases := []struct { + input string + expected CgroupName + }{ + { + input: "/test", + expected: []string{"test"}, + }, + { + input: "/test.slice", + expected: []string{"test"}, + }, + } + + for _, testCase := range testCases { + if actual := ParseSystemdToCgroupName(testCase.input); !reflect.DeepEqual(actual, testCase.expected) { + t.Errorf("Unexpected result, input: %v, expected: %v, actual: %v", testCase.input, testCase.expected, actual) + } + } +} diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index 821fee6f0f4..84370f4aa56 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -103,7 +103,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { // Now apply kube reserved and system reserved limits if required. if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { klog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) - if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { + if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message) @@ -112,7 +112,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { klog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) - if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { + if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message)