diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index b1c4921fa3b..15ef0ec5dc9 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -1248,16 +1248,14 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) { switch v1.ResourceName(k) { // CPU, memory, local storage, and PID resources are supported. case v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage, pidlimit.PIDs: - if v1.ResourceName(k) != pidlimit.PIDs || utilfeature.DefaultFeatureGate.Enabled(features.SupportNodePidsLimit) { - q, err := resource.ParseQuantity(v) - if err != nil { - return nil, err - } - if q.Sign() == -1 { - return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v) - } - rl[v1.ResourceName(k)] = q + q, err := resource.ParseQuantity(v) + if err != nil { + return nil, err } + if q.Sign() == -1 { + return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v) + } + rl[v1.ResourceName(k)] = q default: return nil, fmt.Errorf("cannot reserve %q resource", k) } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index f36ae65c0cc..bd166c4acd0 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -203,6 +203,7 @@ const ( // owner: @dims, @derekwaynecarr // alpha: v1.10 // beta: v1.14 + // GA: v1.20 // // Implement support for limiting pids in pods SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit" @@ -447,8 +448,9 @@ const ( // a volume in a Pod. ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy" - // owner: @RobertKrawitz + // owner: @RobertKrawitz, @derekwaynecarr // beta: v1.15 + // GA: v1.20 // // Implement support for limiting pids in nodes SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit" @@ -680,8 +682,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20 StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA}, SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA}, - SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta}, - SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta}, + SupportPodPidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21 + SupportNodePidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21 HyperVContainer: {Default: false, PreRelease: featuregate.Alpha}, TokenRequest: {Default: true, PreRelease: featuregate.Beta}, TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta}, diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index 8c671d30a65..007c2b8b41f 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -36,8 +36,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - kubefeatures "k8s.io/kubernetes/pkg/features" cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" "k8s.io/kubernetes/pkg/kubelet/metrics" ) @@ -275,11 +273,8 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool { // scoped to the set control groups it understands. this is being discussed // in https://github.com/opencontainers/runc/issues/1440 // once resolved, we can remove this code. - whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd") + whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids") - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) { - whitelistControllers.Insert("pids") - } if _, ok := m.subsystems.MountPoints["hugetlb"]; ok { whitelistControllers.Insert("hugetlb") } @@ -352,13 +347,10 @@ func getSupportedSubsystems() map[subsystem]bool { supportedSubsystems := map[subsystem]bool{ &cgroupfs.MemoryGroup{}: true, &cgroupfs.CpuGroup{}: true, - &cgroupfs.PidsGroup{}: false, + &cgroupfs.PidsGroup{}: true, } // not all hosts support hugetlb cgroup, and in the absent of hugetlb, we will fail silently by reporting no capacity. supportedSubsystems[&cgroupfs.HugetlbGroup{}] = false - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) { - supportedSubsystems[&cgroupfs.PidsGroup{}] = true - } return supportedSubsystems } @@ -417,10 +409,7 @@ var ( // getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2 func getSupportedUnifiedControllers() sets.String { // This is the set of controllers used by the Kubelet - supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb") - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) { - supportedControllers.Insert("pids") - } + supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb", "pids") // Memoize the set of controllers that are present in the root cgroup availableRootControllersOnce.Do(func() { var err error @@ -547,10 +536,8 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont if resourceConfig.CpuPeriod != nil { resources.CpuPeriod = *resourceConfig.CpuPeriod } - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) { - if resourceConfig.PidsLimit != nil { - resources.PidsLimit = *resourceConfig.PidsLimit - } + if resourceConfig.PidsLimit != nil { + resources.PidsLimit = *resourceConfig.PidsLimit } // if huge pages are enabled, we set them in libcontainer // for each page size enumerated, set that value @@ -608,7 +595,7 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error { updateSystemdCgroupInfo(libcontainerCgroupConfig, cgroupConfig.Name) } - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil { + if cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil { libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit } @@ -648,7 +635,7 @@ func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error { } } - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil { + if cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil { libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit } diff --git a/pkg/kubelet/cm/pod_container_manager_linux.go b/pkg/kubelet/cm/pod_container_manager_linux.go index ef3499f44fc..b9fa3e05cde 100644 --- a/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/pkg/kubelet/cm/pod_container_manager_linux.go @@ -26,10 +26,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" - kubefeatures "k8s.io/kubernetes/pkg/features" ) const ( @@ -86,7 +84,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error { Name: podContainerName, ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod), } - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 { + if m.podPidsLimit > 0 { containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit } if err := m.cgroupManager.Create(containerConfig); err != nil { diff --git a/test/e2e_node/pids_test.go b/test/e2e_node/pids_test.go index 0c3192aaefd..8fef2ce2f21 100644 --- a/test/e2e_node/pids_test.go +++ b/test/e2e_node/pids_test.go @@ -118,14 +118,13 @@ func runPodPidsLimitTests(f *framework.Framework) { } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("PodPidsLimit [Serial] [Feature:SupportPodPidsLimit][NodeFeature:SupportPodPidsLimit]", func() { +var _ = SIGDescribe("PodPidsLimit [Serial]", func() { f := framework.NewDefaultFramework("pids-limit-test") ginkgo.Context("With config updated with pids feature enabled", func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { if initialConfig.FeatureGates == nil { initialConfig.FeatureGates = make(map[string]bool) } - initialConfig.FeatureGates["SupportPodPidsLimit"] = true initialConfig.PodPidsLimit = int64(1024) }) runPodPidsLimitTests(f)