Merge pull request #61294 from derekwaynecarr/fix-cfs-quota

Automatic merge from submit-queue (batch tested with PRs 61351, 61294). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix cpu cfs quota flag with pod cgroups

**What this PR does / why we need it**:
It fixes the cpu-cfs-quota flag in the kubelet when pod cgroups are enabled.

**Which issue(s) this PR fixes** 
Fixes #61293

**Special notes for your reviewer**:
This is a regression reported by some of our users that disable cpu quota enforcement.

**Release note**:
```release-note
Fix regression where kubelet --cpu-cfs-quota flag did not work when --cgroups-per-qos was enabled
```
This commit is contained in:
Kubernetes Submit Queue 2018-03-19 08:15:59 -07:00 committed by GitHub
commit 67be0a90f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 54 additions and 11 deletions

View File

@ -661,6 +661,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
ExperimentalCPUManagerPolicy: s.CPUManagerPolicy, ExperimentalCPUManagerPolicy: s.CPUManagerPolicy,
ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration, ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration,
ExperimentalPodPidsLimit: s.PodPidsLimit, ExperimentalPodPidsLimit: s.PodPidsLimit,
EnforceCPULimits: s.CPUCFSQuota,
}, },
s.FailSwapOn, s.FailSwapOn,
devicePluginEnabled, devicePluginEnabled,

View File

@ -111,6 +111,7 @@ type NodeConfig struct {
ExperimentalCPUManagerPolicy string ExperimentalCPUManagerPolicy string
ExperimentalCPUManagerReconcilePeriod time.Duration ExperimentalCPUManagerReconcilePeriod time.Duration
ExperimentalPodPidsLimit int64 ExperimentalPodPidsLimit int64
EnforceCPULimits bool
} }
type NodeAllocatableConfig struct { type NodeAllocatableConfig struct {

View File

@ -301,6 +301,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
subsystems: cm.subsystems, subsystems: cm.subsystems,
cgroupManager: cm.cgroupManager, cgroupManager: cm.cgroupManager,
podPidsLimit: cm.ExperimentalPodPidsLimit, podPidsLimit: cm.ExperimentalPodPidsLimit,
enforceCPULimits: cm.EnforceCPULimits,
} }
} }
return &podContainerManagerNoop{ return &podContainerManagerNoop{

View File

@ -103,7 +103,7 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
} }
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config. // ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig {
// sum requests and limits. // sum requests and limits.
reqs, limits := resource.PodRequestsAndLimits(pod) reqs, limits := resource.PodRequestsAndLimits(pod)
@ -146,6 +146,11 @@ func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
} }
} }
// quota is not capped when cfs quota is disabled
if !enforceCPULimits {
cpuQuota = int64(-1)
}
// determine the qos class // determine the qos class
qosClass := v1qos.GetPodQOS(pod) qosClass := v1qos.GetPodQOS(pod)

View File

@ -57,10 +57,12 @@ func TestResourceConfigForPod(t *testing.T) {
guaranteedShares := MilliCPUToShares(100) guaranteedShares := MilliCPUToShares(100)
guaranteedQuota, guaranteedPeriod := MilliCPUToQuota(100) guaranteedQuota, guaranteedPeriod := MilliCPUToQuota(100)
memoryQuantity = resource.MustParse("100Mi") memoryQuantity = resource.MustParse("100Mi")
cpuNoLimit := int64(-1)
guaranteedMemory := memoryQuantity.Value() guaranteedMemory := memoryQuantity.Value()
testCases := map[string]struct { testCases := map[string]struct {
pod *v1.Pod pod *v1.Pod
expected *ResourceConfig expected *ResourceConfig
enforceCPULimits bool
}{ }{
"besteffort": { "besteffort": {
pod: &v1.Pod{ pod: &v1.Pod{
@ -72,7 +74,8 @@ func TestResourceConfigForPod(t *testing.T) {
}, },
}, },
}, },
expected: &ResourceConfig{CpuShares: &minShares}, enforceCPULimits: true,
expected: &ResourceConfig{CpuShares: &minShares},
}, },
"burstable-no-limits": { "burstable-no-limits": {
pod: &v1.Pod{ pod: &v1.Pod{
@ -84,7 +87,8 @@ func TestResourceConfigForPod(t *testing.T) {
}, },
}, },
}, },
expected: &ResourceConfig{CpuShares: &burstableShares}, enforceCPULimits: true,
expected: &ResourceConfig{CpuShares: &burstableShares},
}, },
"burstable-with-limits": { "burstable-with-limits": {
pod: &v1.Pod{ pod: &v1.Pod{
@ -96,7 +100,21 @@ func TestResourceConfigForPod(t *testing.T) {
}, },
}, },
}, },
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory}, enforceCPULimits: true,
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
},
"burstable-with-limits-no-cpu-enforcement": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
},
},
},
enforceCPULimits: false,
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
}, },
"burstable-partial-limits": { "burstable-partial-limits": {
pod: &v1.Pod{ pod: &v1.Pod{
@ -111,7 +129,8 @@ func TestResourceConfigForPod(t *testing.T) {
}, },
}, },
}, },
expected: &ResourceConfig{CpuShares: &burstablePartialShares}, enforceCPULimits: true,
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
}, },
"guaranteed": { "guaranteed": {
pod: &v1.Pod{ pod: &v1.Pod{
@ -123,11 +142,25 @@ func TestResourceConfigForPod(t *testing.T) {
}, },
}, },
}, },
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory}, enforceCPULimits: true,
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
},
"guaranteed-no-cpu-enforcement": {
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},
},
},
},
enforceCPULimits: false,
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
}, },
} }
for testName, testCase := range testCases { for testName, testCase := range testCases {
actual := ResourceConfigForPod(testCase.pod) actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits)
if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) { if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) {
t.Errorf("unexpected result, test: %v, cpu period not as expected", testName) t.Errorf("unexpected result, test: %v, cpu period not as expected", testName)
} }

View File

@ -43,7 +43,7 @@ func MilliCPUToShares(milliCPU int64) int64 {
} }
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config. // ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig { func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool) *ResourceConfig {
return nil return nil
} }

View File

@ -49,6 +49,8 @@ type podContainerManagerImpl struct {
cgroupManager CgroupManager cgroupManager CgroupManager
// Maximum number of pids in a pod // Maximum number of pids in a pod
podPidsLimit int64 podPidsLimit int64
// enforceCPULimits controls whether cfs quota is enforced or not
enforceCPULimits bool
} }
// Make sure that podContainerManagerImpl implements the PodContainerManager interface // Make sure that podContainerManagerImpl implements the PodContainerManager interface
@ -79,7 +81,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
// Create the pod container // Create the pod container
containerConfig := &CgroupConfig{ containerConfig := &CgroupConfig{
Name: podContainerName, Name: podContainerName,
ResourceParameters: ResourceConfigForPod(pod), ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits),
} }
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 { if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 {
containerConfig.ResourceParameters.PodPidsLimit = &m.podPidsLimit containerConfig.ResourceParameters.PodPidsLimit = &m.podPidsLimit