mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Fix cpu cfs quota flag with pod cgroups
This commit is contained in:
parent
ca02c11887
commit
f68f3ff783
@ -661,6 +661,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
|
||||
ExperimentalCPUManagerPolicy: s.CPUManagerPolicy,
|
||||
ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration,
|
||||
ExperimentalPodPidsLimit: s.PodPidsLimit,
|
||||
EnforceCPULimits: s.CPUCFSQuota,
|
||||
},
|
||||
s.FailSwapOn,
|
||||
devicePluginEnabled,
|
||||
|
@ -111,6 +111,7 @@ type NodeConfig struct {
|
||||
ExperimentalCPUManagerPolicy string
|
||||
ExperimentalCPUManagerReconcilePeriod time.Duration
|
||||
ExperimentalPodPidsLimit int64
|
||||
EnforceCPULimits bool
|
||||
}
|
||||
|
||||
type NodeAllocatableConfig struct {
|
||||
|
@ -301,6 +301,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
||||
subsystems: cm.subsystems,
|
||||
cgroupManager: cm.cgroupManager,
|
||||
podPidsLimit: cm.ExperimentalPodPidsLimit,
|
||||
enforceCPULimits: cm.EnforceCPULimits,
|
||||
}
|
||||
}
|
||||
return &podContainerManagerNoop{
|
||||
|
@ -103,7 +103,7 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
|
||||
}
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig {
|
||||
// sum requests and limits.
|
||||
reqs, limits := resource.PodRequestsAndLimits(pod)
|
||||
|
||||
@ -146,6 +146,11 @@ func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// quota is not capped when cfs quota is disabled
|
||||
if !enforceCPULimits {
|
||||
cpuQuota = int64(-1)
|
||||
}
|
||||
|
||||
// determine the qos class
|
||||
qosClass := v1qos.GetPodQOS(pod)
|
||||
|
||||
|
@ -57,10 +57,12 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
guaranteedQuota, guaranteedPeriod := MilliCPUToQuota(100)
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
testCases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
enforceCPULimits bool
|
||||
}{
|
||||
"besteffort": {
|
||||
pod: &v1.Pod{
|
||||
@ -72,7 +74,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &ResourceConfig{CpuShares: &minShares},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &minShares},
|
||||
},
|
||||
"burstable-no-limits": {
|
||||
pod: &v1.Pod{
|
||||
@ -84,7 +87,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-limits": {
|
||||
pod: &v1.Pod{
|
||||
@ -96,7 +100,21 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits": {
|
||||
pod: &v1.Pod{
|
||||
@ -111,7 +129,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"guaranteed": {
|
||||
pod: &v1.Pod{
|
||||
@ -123,11 +142,25 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
actual := ResourceConfigForPod(testCase.pod)
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits)
|
||||
if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) {
|
||||
t.Errorf("unexpected result, test: %v, cpu period not as expected", testName)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ func MilliCPUToShares(milliCPU int64) int64 {
|
||||
}
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool) *ResourceConfig {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,8 @@ type podContainerManagerImpl struct {
|
||||
cgroupManager CgroupManager
|
||||
// Maximum number of pids in a pod
|
||||
podPidsLimit int64
|
||||
// enforceCPULimits controls whether cfs quota is enforced or not
|
||||
enforceCPULimits bool
|
||||
}
|
||||
|
||||
// Make sure that podContainerManagerImpl implements the PodContainerManager interface
|
||||
@ -79,7 +81,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
||||
// Create the pod container
|
||||
containerConfig := &CgroupConfig{
|
||||
Name: podContainerName,
|
||||
ResourceParameters: ResourceConfigForPod(pod),
|
||||
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 {
|
||||
containerConfig.ResourceParameters.PodPidsLimit = &m.podPidsLimit
|
||||
|
Loading…
Reference in New Issue
Block a user