Promote PidLimits to GA

This commit is contained in:
Derek Carr 2020-08-24 13:57:48 -04:00
parent 14a11060a0
commit 6f2153986a
5 changed files with 21 additions and 37 deletions

View File

@ -1248,16 +1248,14 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) {
switch v1.ResourceName(k) { switch v1.ResourceName(k) {
// CPU, memory, local storage, and PID resources are supported. // CPU, memory, local storage, and PID resources are supported.
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage, pidlimit.PIDs: case v1.ResourceCPU, v1.ResourceMemory, v1.ResourceEphemeralStorage, pidlimit.PIDs:
if v1.ResourceName(k) != pidlimit.PIDs || utilfeature.DefaultFeatureGate.Enabled(features.SupportNodePidsLimit) { q, err := resource.ParseQuantity(v)
q, err := resource.ParseQuantity(v) if err != nil {
if err != nil { return nil, err
return nil, err
}
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[v1.ResourceName(k)] = q
} }
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[v1.ResourceName(k)] = q
default: default:
return nil, fmt.Errorf("cannot reserve %q resource", k) return nil, fmt.Errorf("cannot reserve %q resource", k)
} }

View File

@ -203,6 +203,7 @@ const (
// owner: @dims, @derekwaynecarr // owner: @dims, @derekwaynecarr
// alpha: v1.10 // alpha: v1.10
// beta: v1.14 // beta: v1.14
// GA: v1.20
// //
// Implement support for limiting pids in pods // Implement support for limiting pids in pods
SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit" SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit"
@ -447,8 +448,9 @@ const (
// a volume in a Pod. // a volume in a Pod.
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy" ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"
// owner: @RobertKrawitz // owner: @RobertKrawitz, @derekwaynecarr
// beta: v1.15 // beta: v1.15
// GA: v1.20
// //
// Implement support for limiting pids in nodes // Implement support for limiting pids in nodes
SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit" SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit"
@ -680,8 +682,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20 BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA}, StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA}, SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA},
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta}, SupportPodPidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta}, SupportNodePidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.21
HyperVContainer: {Default: false, PreRelease: featuregate.Alpha}, HyperVContainer: {Default: false, PreRelease: featuregate.Alpha},
TokenRequest: {Default: true, PreRelease: featuregate.Beta}, TokenRequest: {Default: true, PreRelease: featuregate.Beta},
TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta}, TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta},

View File

@ -36,8 +36,6 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
kubefeatures "k8s.io/kubernetes/pkg/features"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/kubernetes/pkg/kubelet/metrics" "k8s.io/kubernetes/pkg/kubelet/metrics"
) )
@ -275,11 +273,8 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool {
// scoped to the set control groups it understands. this is being discussed // scoped to the set control groups it understands. this is being discussed
// in https://github.com/opencontainers/runc/issues/1440 // in https://github.com/opencontainers/runc/issues/1440
// once resolved, we can remove this code. // once resolved, we can remove this code.
whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd") whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd", "pids")
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
whitelistControllers.Insert("pids")
}
if _, ok := m.subsystems.MountPoints["hugetlb"]; ok { if _, ok := m.subsystems.MountPoints["hugetlb"]; ok {
whitelistControllers.Insert("hugetlb") whitelistControllers.Insert("hugetlb")
} }
@ -352,13 +347,10 @@ func getSupportedSubsystems() map[subsystem]bool {
supportedSubsystems := map[subsystem]bool{ supportedSubsystems := map[subsystem]bool{
&cgroupfs.MemoryGroup{}: true, &cgroupfs.MemoryGroup{}: true,
&cgroupfs.CpuGroup{}: true, &cgroupfs.CpuGroup{}: true,
&cgroupfs.PidsGroup{}: false, &cgroupfs.PidsGroup{}: true,
} }
// not all hosts support hugetlb cgroup, and in the absent of hugetlb, we will fail silently by reporting no capacity. // not all hosts support hugetlb cgroup, and in the absent of hugetlb, we will fail silently by reporting no capacity.
supportedSubsystems[&cgroupfs.HugetlbGroup{}] = false supportedSubsystems[&cgroupfs.HugetlbGroup{}] = false
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
supportedSubsystems[&cgroupfs.PidsGroup{}] = true
}
return supportedSubsystems return supportedSubsystems
} }
@ -417,10 +409,7 @@ var (
// getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2 // getSupportedUnifiedControllers returns a set of supported controllers when running on cgroup v2
func getSupportedUnifiedControllers() sets.String { func getSupportedUnifiedControllers() sets.String {
// This is the set of controllers used by the Kubelet // This is the set of controllers used by the Kubelet
supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb") supportedControllers := sets.NewString("cpu", "cpuset", "memory", "hugetlb", "pids")
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) {
supportedControllers.Insert("pids")
}
// Memoize the set of controllers that are present in the root cgroup // Memoize the set of controllers that are present in the root cgroup
availableRootControllersOnce.Do(func() { availableRootControllersOnce.Do(func() {
var err error var err error
@ -547,10 +536,8 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont
if resourceConfig.CpuPeriod != nil { if resourceConfig.CpuPeriod != nil {
resources.CpuPeriod = *resourceConfig.CpuPeriod resources.CpuPeriod = *resourceConfig.CpuPeriod
} }
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) || utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportNodePidsLimit) { if resourceConfig.PidsLimit != nil {
if resourceConfig.PidsLimit != nil { resources.PidsLimit = *resourceConfig.PidsLimit
resources.PidsLimit = *resourceConfig.PidsLimit
}
} }
// if huge pages are enabled, we set them in libcontainer // if huge pages are enabled, we set them in libcontainer
// for each page size enumerated, set that value // for each page size enumerated, set that value
@ -608,7 +595,7 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
updateSystemdCgroupInfo(libcontainerCgroupConfig, cgroupConfig.Name) updateSystemdCgroupInfo(libcontainerCgroupConfig, cgroupConfig.Name)
} }
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil { if cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil {
libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit
} }
@ -648,7 +635,7 @@ func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
} }
} }
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil { if cgroupConfig.ResourceParameters != nil && cgroupConfig.ResourceParameters.PidsLimit != nil {
libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit libcontainerCgroupConfig.PidsLimit = *cgroupConfig.ResourceParameters.PidsLimit
} }

View File

@ -26,10 +26,8 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2" "k8s.io/klog/v2"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
kubefeatures "k8s.io/kubernetes/pkg/features"
) )
const ( const (
@ -86,7 +84,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
Name: podContainerName, Name: podContainerName,
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod), ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod),
} }
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 { if m.podPidsLimit > 0 {
containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit
} }
if err := m.cgroupManager.Create(containerConfig); err != nil { if err := m.cgroupManager.Create(containerConfig); err != nil {

View File

@ -118,14 +118,13 @@ func runPodPidsLimitTests(f *framework.Framework) {
} }
// Serial because the test updates kubelet configuration. // Serial because the test updates kubelet configuration.
var _ = SIGDescribe("PodPidsLimit [Serial] [Feature:SupportPodPidsLimit][NodeFeature:SupportPodPidsLimit]", func() { var _ = SIGDescribe("PodPidsLimit [Serial]", func() {
f := framework.NewDefaultFramework("pids-limit-test") f := framework.NewDefaultFramework("pids-limit-test")
ginkgo.Context("With config updated with pids feature enabled", func() { ginkgo.Context("With config updated with pids feature enabled", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
if initialConfig.FeatureGates == nil { if initialConfig.FeatureGates == nil {
initialConfig.FeatureGates = make(map[string]bool) initialConfig.FeatureGates = make(map[string]bool)
} }
initialConfig.FeatureGates["SupportPodPidsLimit"] = true
initialConfig.PodPidsLimit = int64(1024) initialConfig.PodPidsLimit = int64(1024)
}) })
runPodPidsLimitTests(f) runPodPidsLimitTests(f)