mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
fix #51135 make CFS quota period configurable, adds a cli flag and config option to kubelet to be able to set cpu.cfs_period and defaults to 100ms as before.
It requires to enable feature gate CustomCPUCFSQuotaPeriod. Signed-off-by: Sandor Szücs <sandor.szuecs@zalando.de>
This commit is contained in:
parent
380931aca7
commit
588d2808b7
@ -78,6 +78,7 @@ ComponentConfigs:
|
||||
CacheAuthorizedTTL: 5m0s
|
||||
CacheUnauthorizedTTL: 30s
|
||||
CPUCFSQuota: true
|
||||
CPUCFSQuotaPeriod: 0s
|
||||
CPUManagerPolicy: none
|
||||
CPUManagerReconcilePeriod: 10s
|
||||
CgroupDriver: cgroupfs
|
||||
|
@ -91,6 +91,7 @@ kubeletConfiguration:
|
||||
containerLogMaxSize: 10Mi
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
cpuCFSQuota: true
|
||||
cpuCFSQuotaPeriod: 0s
|
||||
cpuManagerPolicy: none
|
||||
cpuManagerReconcilePeriod: 10s
|
||||
enableControllerAttachDetach: true
|
||||
|
@ -107,6 +107,7 @@ containerLogMaxFiles: 5
|
||||
containerLogMaxSize: 10Mi
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
cpuCFSQuota: true
|
||||
cpuCFSQuotaPeriod: 0s
|
||||
cpuManagerPolicy: none
|
||||
cpuManagerReconcilePeriod: 10s
|
||||
enableControllerAttachDetach: true
|
||||
|
@ -102,6 +102,7 @@ containerLogMaxFiles: 5
|
||||
containerLogMaxSize: 10Mi
|
||||
contentType: application/vnd.kubernetes.protobuf
|
||||
cpuCFSQuota: true
|
||||
cpuCFSQuotaPeriod: 0s
|
||||
cpuManagerPolicy: none
|
||||
cpuManagerReconcilePeriod: 10s
|
||||
enableControllerAttachDetach: true
|
||||
|
@ -530,6 +530,7 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig
|
||||
|
||||
fs.StringVar(&c.ResolverConfig, "resolv-conf", c.ResolverConfig, "Resolver configuration file used as the basis for the container DNS resolution configuration.")
|
||||
fs.BoolVar(&c.CPUCFSQuota, "cpu-cfs-quota", c.CPUCFSQuota, "Enable CPU CFS quota enforcement for containers that specify CPU limits")
|
||||
fs.DurationVar(&c.CPUCFSQuotaPeriod.Duration, "cpu-cfs-quota-period", c.CPUCFSQuotaPeriod.Duration, "Sets CPU CFS quota period value, cpu.cfs_period_us, defaults to Linux Kernel default")
|
||||
fs.BoolVar(&c.EnableControllerAttachDetach, "enable-controller-attach-detach", c.EnableControllerAttachDetach, "Enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations")
|
||||
fs.BoolVar(&c.MakeIPTablesUtilChains, "make-iptables-util-chains", c.MakeIPTablesUtilChains, "If true, kubelet will ensure iptables utility rules are present on host.")
|
||||
fs.Int32Var(&c.IPTablesMasqueradeBit, "iptables-masquerade-bit", c.IPTablesMasqueradeBit, "The bit of the fwmark space to mark packets for SNAT. Must be within the range [0, 31]. Please match this parameter with corresponding parameter in kube-proxy.")
|
||||
|
@ -708,6 +708,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
|
||||
ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration,
|
||||
ExperimentalPodPidsLimit: s.PodPidsLimit,
|
||||
EnforceCPULimits: s.CPUCFSQuota,
|
||||
CPUCFSQuotaPeriod: s.CPUCFSQuotaPeriod.Duration,
|
||||
},
|
||||
s.FailSwapOn,
|
||||
devicePluginEnabled,
|
||||
|
@ -155,6 +155,12 @@ const (
|
||||
// Alternative container-level CPU affinity policies.
|
||||
CPUManager utilfeature.Feature = "CPUManager"
|
||||
|
||||
// owner: @szuecs
|
||||
// alpha: v1.12
|
||||
//
|
||||
// Enable nodes to change CPUCFSQuotaPeriod
|
||||
CPUCFSQuotaPeriod utilfeature.Feature = "CustomCPUCFSQuotaPeriod"
|
||||
|
||||
// owner: @derekwaynecarr
|
||||
// beta: v1.10
|
||||
//
|
||||
@ -408,6 +414,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS
|
||||
ExpandInUsePersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
AttachVolumeLimit: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
CPUManager: {Default: true, PreRelease: utilfeature.Beta},
|
||||
CPUCFSQuotaPeriod: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
MountContainers: {Default: false, PreRelease: utilfeature.Alpha},
|
||||
VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta},
|
||||
|
@ -145,6 +145,7 @@ var (
|
||||
"Authorization.Webhook.CacheAuthorizedTTL.Duration",
|
||||
"Authorization.Webhook.CacheUnauthorizedTTL.Duration",
|
||||
"CPUCFSQuota",
|
||||
"CPUCFSQuotaPeriod.Duration",
|
||||
"CPUManagerPolicy",
|
||||
"CPUManagerReconcilePeriod.Duration",
|
||||
"QOSReserved[*]",
|
||||
|
@ -220,6 +220,8 @@ type KubeletConfiguration struct {
|
||||
// cpuCFSQuota enables CPU CFS quota enforcement for containers that
|
||||
// specify CPU limits
|
||||
CPUCFSQuota bool
|
||||
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
|
||||
CPUCFSQuotaPeriod metav1.Duration
|
||||
// maxOpenFiles is Number of files that can be opened by Kubelet process.
|
||||
MaxOpenFiles int64
|
||||
// contentType is contentType of requests sent to apiserver.
|
||||
|
@ -18,6 +18,7 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
@ -154,6 +155,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
||||
if obj.CPUCFSQuota == nil {
|
||||
obj.CPUCFSQuota = utilpointer.BoolPtr(true)
|
||||
}
|
||||
if obj.CPUCFSQuotaPeriod == nil && obj.FeatureGates[string(features.CPUCFSQuotaPeriod)] {
|
||||
obj.CPUCFSQuotaPeriod = &metav1.Duration{Duration: 100 * time.Millisecond}
|
||||
}
|
||||
if obj.MaxOpenFiles == 0 {
|
||||
obj.MaxOpenFiles = 1000000
|
||||
}
|
||||
|
@ -466,6 +466,13 @@ type KubeletConfiguration struct {
|
||||
// Default: true
|
||||
// +optional
|
||||
CPUCFSQuota *bool `json:"cpuCFSQuota,omitempty"`
|
||||
// CPUCFSQuotaPeriod is the CPU CFS quota period value, cpu.cfs_period_us.
|
||||
// Dynamic Kubelet Config (beta): If dynamically updating this field, consider that
|
||||
// limits set for containers will result in different cpu.cfs_quota settings. This
|
||||
// will trigger container restarts on the node being reconfigured.
|
||||
// Default: "100ms"
|
||||
// +optional
|
||||
CPUCFSQuotaPeriod *metav1.Duration `json:"cpuCFSQuotaPeriod,omitempty"`
|
||||
// maxOpenFiles is Number of files that can be opened by Kubelet process.
|
||||
// Dynamic Kubelet Config (beta): If dynamically updating this field, consider that
|
||||
// it may impact the ability of the Kubelet to interact with the node's filesystem.
|
||||
|
@ -280,6 +280,9 @@ func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in
|
||||
if err := v1.Convert_Pointer_bool_To_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MaxOpenFiles = in.MaxOpenFiles
|
||||
out.ContentType = in.ContentType
|
||||
if err := v1.Convert_Pointer_int32_To_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil {
|
||||
@ -406,6 +409,9 @@ func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in
|
||||
if err := v1.Convert_bool_To_Pointer_bool(&in.CPUCFSQuota, &out.CPUCFSQuota, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MaxOpenFiles = in.MaxOpenFiles
|
||||
out.ContentType = in.ContentType
|
||||
if err := v1.Convert_int32_To_Pointer_int32(&in.KubeAPIQPS, &out.KubeAPIQPS, s); err != nil {
|
||||
|
@ -21,6 +21,7 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@ -178,6 +179,11 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.CPUCFSQuotaPeriod != nil {
|
||||
in, out := &in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
if in.KubeAPIQPS != nil {
|
||||
in, out := &in.KubeAPIQPS, &out.KubeAPIQPS
|
||||
*out = new(int32)
|
||||
|
@ -43,6 +43,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -18,6 +18,7 @@ package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
|
||||
@ -54,6 +55,9 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error
|
||||
if kc.HealthzPort != 0 && utilvalidation.IsValidPortNum(int(kc.HealthzPort)) != nil {
|
||||
allErrors = append(allErrors, fmt.Errorf("invalid configuration: HealthzPort (--healthz-port) %v must be between 1 and 65535, inclusive", kc.HealthzPort))
|
||||
}
|
||||
if localFeatureGate.Enabled(features.CPUCFSQuotaPeriod) && utilvalidation.IsInRange(int(kc.CPUCFSQuotaPeriod.Duration), int(1*time.Microsecond), int(time.Second)) != nil {
|
||||
allErrors = append(allErrors, fmt.Errorf("invalid configuration: CPUCFSQuotaPeriod (--cpu-cfs-quota-period) %v must be between 1usec and 1sec, inclusive", kc.CPUCFSQuotaPeriod))
|
||||
}
|
||||
if utilvalidation.IsInRange(int(kc.ImageGCHighThresholdPercent), 0, 100) != nil {
|
||||
allErrors = append(allErrors, fmt.Errorf("invalid configuration: ImageGCHighThresholdPercent (--image-gc-high-threshold) %v must be between 0 and 100, inclusive", kc.ImageGCHighThresholdPercent))
|
||||
}
|
||||
|
@ -18,7 +18,9 @@ package validation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
)
|
||||
@ -48,6 +50,7 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
||||
RegistryPullQPS: 5,
|
||||
HairpinMode: kubeletconfig.PromiscuousBridge,
|
||||
NodeLeaseDurationSeconds: 1,
|
||||
CPUCFSQuotaPeriod: metav1.Duration{Duration: 100 * time.Millisecond},
|
||||
}
|
||||
if allErrors := ValidateKubeletConfiguration(successCase); allErrors != nil {
|
||||
t.Errorf("expect no errors, got %v", allErrors)
|
||||
@ -77,6 +80,7 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
||||
RegistryPullQPS: -10,
|
||||
HairpinMode: "foo",
|
||||
NodeLeaseDurationSeconds: -1,
|
||||
CPUCFSQuotaPeriod: metav1.Duration{Duration: 0},
|
||||
}
|
||||
const numErrs = 23
|
||||
if allErrors := ValidateKubeletConfiguration(errorCase); len(allErrors.(utilerrors.Aggregate).Errors()) != numErrs {
|
||||
|
1
pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
1
pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
@ -123,6 +123,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
}
|
||||
}
|
||||
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
|
||||
out.CPUCFSQuotaPeriod = in.CPUCFSQuotaPeriod
|
||||
if in.EvictionHard != nil {
|
||||
in, out := &in.EvictionHard, &out.EvictionHard
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
@ -134,11 +134,14 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/eviction/api:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
],
|
||||
|
@ -114,6 +114,7 @@ type NodeConfig struct {
|
||||
ExperimentalCPUManagerReconcilePeriod time.Duration
|
||||
ExperimentalPodPidsLimit int64
|
||||
EnforceCPULimits bool
|
||||
CPUCFSQuotaPeriod time.Duration
|
||||
}
|
||||
|
||||
type NodeAllocatableConfig struct {
|
||||
|
@ -307,6 +307,7 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
||||
cgroupManager: cm.cgroupManager,
|
||||
podPidsLimit: cm.ExperimentalPodPidsLimit,
|
||||
enforceCPULimits: cm.EnforceCPULimits,
|
||||
cpuCFSQuotaPeriod: uint64(cm.CPUCFSQuotaPeriod / time.Microsecond),
|
||||
}
|
||||
}
|
||||
return &podContainerManagerNoop{
|
||||
|
@ -27,9 +27,11 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -44,28 +46,29 @@ const (
|
||||
)
|
||||
|
||||
// MilliCPUToQuota converts milliCPU to CFS quota and period values.
|
||||
func MilliCPUToQuota(milliCPU int64) (quota int64, period uint64) {
|
||||
func MilliCPUToQuota(milliCPU int64, period int64) (quota int64) {
|
||||
// CFS quota is measured in two values:
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across)
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across given by period)
|
||||
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
|
||||
// so in the above example, you are limited to 20% of a single CPU
|
||||
// for multi-cpu environments, you just scale equivalent amounts
|
||||
// see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details
|
||||
|
||||
if milliCPU == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// we set the period to 100ms by default
|
||||
period = QuotaPeriod
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) {
|
||||
period = QuotaPeriod
|
||||
}
|
||||
|
||||
// we then convert your milliCPU to a value normalized over a period
|
||||
quota = (milliCPU * QuotaPeriod) / MilliCPUToCPU
|
||||
quota = (milliCPU * period) / MilliCPUToCPU
|
||||
|
||||
// quota needs to be a minimum of 1ms.
|
||||
if quota < MinQuotaPeriod {
|
||||
quota = MinQuotaPeriod
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -103,7 +106,7 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
|
||||
}
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig {
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64) *ResourceConfig {
|
||||
// sum requests and limits.
|
||||
reqs, limits := resource.PodRequestsAndLimits(pod)
|
||||
|
||||
@ -122,7 +125,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool) *ResourceConfig {
|
||||
|
||||
// convert to CFS values
|
||||
cpuShares := MilliCPUToShares(cpuRequests)
|
||||
cpuQuota, cpuPeriod := MilliCPUToQuota(cpuLimits)
|
||||
cpuQuota := MilliCPUToQuota(cpuLimits, int64(cpuPeriod))
|
||||
|
||||
// track if limits were applied for each resource.
|
||||
memoryLimitsDeclared := true
|
||||
|
@ -20,11 +20,15 @@ package cm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"strconv"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
pkgfeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// getResourceList returns a ResourceList with the
|
||||
@ -49,14 +53,18 @@ func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequir
|
||||
}
|
||||
|
||||
func TestResourceConfigForPod(t *testing.T) {
|
||||
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond)
|
||||
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond)
|
||||
|
||||
minShares := uint64(MinShares)
|
||||
burstableShares := MilliCPUToShares(100)
|
||||
memoryQuantity := resource.MustParse("200Mi")
|
||||
burstableMemory := memoryQuantity.Value()
|
||||
burstablePartialShares := MilliCPUToShares(200)
|
||||
burstableQuota, burstablePeriod := MilliCPUToQuota(200)
|
||||
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
guaranteedQuota, guaranteedPeriod := MilliCPUToQuota(100)
|
||||
guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
||||
guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
@ -64,6 +72,7 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
enforceCPULimits bool
|
||||
quotaPeriod uint64
|
||||
}{
|
||||
"besteffort": {
|
||||
pod: &v1.Pod{
|
||||
@ -76,6 +85,7 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &minShares},
|
||||
},
|
||||
"burstable-no-limits": {
|
||||
@ -89,6 +99,7 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-limits": {
|
||||
@ -102,7 +113,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
@ -115,7 +127,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits": {
|
||||
pod: &v1.Pod{
|
||||
@ -131,6 +144,52 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"burstable-with-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"guaranteed": {
|
||||
@ -144,7 +203,8 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
@ -157,11 +217,264 @@ func TestResourceConfigForPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &guaranteedPeriod, Memory: &guaranteedMemory},
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits)
|
||||
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod)
|
||||
|
||||
if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) {
|
||||
t.Errorf("unexpected result, test: %v, cpu period not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CpuQuota, testCase.expected.CpuQuota) {
|
||||
t.Errorf("unexpected result, test: %v, cpu quota not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.CpuShares, testCase.expected.CpuShares) {
|
||||
t.Errorf("unexpected result, test: %v, cpu shares not as expected", testName)
|
||||
}
|
||||
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
|
||||
t.Errorf("unexpected result, test: %v, memory not as expected", testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceConfigForPodWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
defaultQuotaPeriod := uint64(100 * time.Millisecond / time.Microsecond)
|
||||
tunedQuotaPeriod := uint64(5 * time.Millisecond / time.Microsecond)
|
||||
tunedQuota := int64(1 * time.Millisecond / time.Microsecond)
|
||||
|
||||
utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUCFSQuotaPeriod, false)
|
||||
|
||||
minShares := uint64(MinShares)
|
||||
burstableShares := MilliCPUToShares(100)
|
||||
memoryQuantity := resource.MustParse("200Mi")
|
||||
burstableMemory := memoryQuantity.Value()
|
||||
burstablePartialShares := MilliCPUToShares(200)
|
||||
burstableQuota := MilliCPUToQuota(200, int64(defaultQuotaPeriod))
|
||||
guaranteedShares := MilliCPUToShares(100)
|
||||
guaranteedQuota := MilliCPUToQuota(100, int64(defaultQuotaPeriod))
|
||||
guaranteedTunedQuota := MilliCPUToQuota(100, int64(tunedQuotaPeriod))
|
||||
memoryQuantity = resource.MustParse("100Mi")
|
||||
cpuNoLimit := int64(-1)
|
||||
guaranteedMemory := memoryQuantity.Value()
|
||||
testCases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
expected *ResourceConfig
|
||||
enforceCPULimits bool
|
||||
quotaPeriod uint64
|
||||
}{
|
||||
"besteffort": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &minShares},
|
||||
},
|
||||
"burstable-no-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares},
|
||||
},
|
||||
"burstable-with-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"burstable-with-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &tunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-with-limits-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &burstableMemory},
|
||||
},
|
||||
"burstable-partial-limits-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
|
||||
},
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
|
||||
},
|
||||
"guaranteed": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedQuota, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: defaultQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: true,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &guaranteedTunedQuota, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
"guaranteed-no-cpu-enforcement-with-tuned-quota": {
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
enforceCPULimits: false,
|
||||
quotaPeriod: tunedQuotaPeriod,
|
||||
expected: &ResourceConfig{CpuShares: &guaranteedShares, CpuQuota: &cpuNoLimit, CpuPeriod: &tunedQuotaPeriod, Memory: &guaranteedMemory},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
|
||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod)
|
||||
|
||||
if !reflect.DeepEqual(actual.CpuPeriod, testCase.expected.CpuPeriod) {
|
||||
t.Errorf("unexpected result, test: %v, cpu period not as expected", testName)
|
||||
}
|
||||
@ -225,9 +538,9 @@ func TestMilliCPUToQuota(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
quota, period := MilliCPUToQuota(testCase.input)
|
||||
if quota != testCase.quota || period != testCase.period {
|
||||
t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period)
|
||||
quota := MilliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.quota {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.quota, quota)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,13 +28,12 @@ const (
|
||||
SharesPerCPU = 0
|
||||
MilliCPUToCPU = 0
|
||||
|
||||
QuotaPeriod = 0
|
||||
MinQuotaPeriod = 0
|
||||
)
|
||||
|
||||
// MilliCPUToQuota converts milliCPU to CFS quota and period values.
|
||||
func MilliCPUToQuota(milliCPU int64) (int64, int64) {
|
||||
return 0, 0
|
||||
// MilliCPUToQuota converts milliCPU and period to CFS quota values.
|
||||
func MilliCPUToQuota(milliCPU, period int64) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// MilliCPUToShares converts the milliCPU to CFS shares.
|
||||
@ -43,7 +42,7 @@ func MilliCPUToShares(milliCPU int64) int64 {
|
||||
}
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool) *ResourceConfig {
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimit bool, cpuPeriod uint64) *ResourceConfig {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,9 @@ type podContainerManagerImpl struct {
|
||||
podPidsLimit int64
|
||||
// enforceCPULimits controls whether cfs quota is enforced or not
|
||||
enforceCPULimits bool
|
||||
// cpuCFSQuotaPeriod is the cfs period value, cfs_period_us, setting per
|
||||
// node for all containers in usec
|
||||
cpuCFSQuotaPeriod uint64
|
||||
}
|
||||
|
||||
// Make sure that podContainerManagerImpl implements the PodContainerManager interface
|
||||
@ -81,7 +84,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
||||
// Create the pod container
|
||||
containerConfig := &CgroupConfig{
|
||||
Name: podContainerName,
|
||||
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits),
|
||||
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SupportPodPidsLimit) && m.podPidsLimit > 0 {
|
||||
containerConfig.ResourceParameters.PodPidsLimit = &m.podPidsLimit
|
||||
|
@ -668,6 +668,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
float32(kubeCfg.RegistryPullQPS),
|
||||
int(kubeCfg.RegistryBurst),
|
||||
kubeCfg.CPUCFSQuota,
|
||||
kubeCfg.CPUCFSQuotaPeriod,
|
||||
runtimeService,
|
||||
imageService,
|
||||
kubeDeps.ContainerManager.InternalContainerLifecycle(),
|
||||
|
@ -83,6 +83,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"helpers_linux_test.go",
|
||||
"helpers_test.go",
|
||||
"instrumented_services_test.go",
|
||||
"kuberuntime_container_linux_test.go",
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
@ -74,6 +75,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
||||
kubeRuntimeManager := &kubeGenericRuntimeManager{
|
||||
recorder: recorder,
|
||||
cpuCFSQuota: false,
|
||||
cpuCFSQuotaPeriod: metav1.Duration{Duration: time.Microsecond * 100},
|
||||
livenessManager: proberesults.NewManager(),
|
||||
containerRefManager: kubecontainer.NewRefManager(),
|
||||
machineInfo: machineInfo,
|
||||
|
@ -18,6 +18,11 @@ limitations under the License.
|
||||
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
|
||||
minShares = 2
|
||||
@ -25,7 +30,7 @@ const (
|
||||
milliCPUToCPU = 1000
|
||||
|
||||
// 100000 is equivalent to 100ms
|
||||
quotaPeriod = 100 * minQuotaPeriod
|
||||
quotaPeriod = 100000
|
||||
minQuotaPeriod = 1000
|
||||
)
|
||||
|
||||
@ -44,21 +49,22 @@ func milliCPUToShares(milliCPU int64) int64 {
|
||||
}
|
||||
|
||||
// milliCPUToQuota converts milliCPU to CFS quota and period values
|
||||
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
|
||||
func milliCPUToQuota(milliCPU int64, period int64) (quota int64) {
|
||||
// CFS quota is measured in two values:
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across)
|
||||
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
|
||||
// so in the above example, you are limited to 20% of a single CPU
|
||||
// for multi-cpu environments, you just scale equivalent amounts
|
||||
// see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details
|
||||
if milliCPU == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// we set the period to 100ms by default
|
||||
period = quotaPeriod
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) {
|
||||
period = quotaPeriod
|
||||
}
|
||||
|
||||
// we then convert your milliCPU to a value normalized over a period
|
||||
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
|
||||
quota = (milliCPU * period) / milliCPUToCPU
|
||||
|
||||
// quota needs to be a minimum of 1ms.
|
||||
if quota < minQuotaPeriod {
|
||||
|
204
pkg/kubelet/kuberuntime/helpers_linux_test.go
Normal file
204
pkg/kubelet/kuberuntime/helpers_linux_test.go
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMilliCPUToQuota(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
msg string
|
||||
input int64
|
||||
expected int64
|
||||
period uint64
|
||||
}{
|
||||
{
|
||||
msg: "all-zero",
|
||||
input: int64(0),
|
||||
expected: int64(0),
|
||||
period: uint64(0),
|
||||
},
|
||||
{
|
||||
msg: "5 input default quota and period",
|
||||
input: int64(5),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "9 input default quota and period",
|
||||
input: int64(9),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "10 input default quota and period",
|
||||
input: int64(10),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 20k quota and default period",
|
||||
input: int64(200),
|
||||
expected: int64(20000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 50k quota and default period",
|
||||
input: int64(500),
|
||||
expected: int64(50000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1k input 100k quota and default period",
|
||||
input: int64(1000),
|
||||
expected: int64(100000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 150k quota and default period",
|
||||
input: int64(1500),
|
||||
expected: int64(150000),
|
||||
period: uint64(100000),
|
||||
}} {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
quota := milliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.expected {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.expected, quota)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMilliCPUToQuotaWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, false)
|
||||
|
||||
for _, testCase := range []struct {
|
||||
msg string
|
||||
input int64
|
||||
expected int64
|
||||
period uint64
|
||||
}{
|
||||
{
|
||||
msg: "all-zero",
|
||||
input: int64(0),
|
||||
expected: int64(0),
|
||||
period: uint64(0),
|
||||
},
|
||||
{
|
||||
msg: "5 input default quota and period",
|
||||
input: int64(5),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "9 input default quota and period",
|
||||
input: int64(9),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "10 input default quota and period",
|
||||
input: int64(10),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 20k quota and default period",
|
||||
input: int64(200),
|
||||
expected: int64(20000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 50k quota and default period",
|
||||
input: int64(500),
|
||||
expected: int64(50000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1k input 100k quota and default period",
|
||||
input: int64(1000),
|
||||
expected: int64(100000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 150k quota and default period",
|
||||
input: int64(1500),
|
||||
expected: int64(150000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "5 input 10k period and default quota expected",
|
||||
input: int64(5),
|
||||
period: uint64(10000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "5 input 5k period and default quota expected",
|
||||
input: int64(5),
|
||||
period: uint64(5000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "9 input 10k period and default quota expected",
|
||||
input: int64(9),
|
||||
period: uint64(10000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "10 input 200k period and 2000 quota expected",
|
||||
input: int64(10),
|
||||
period: uint64(200000),
|
||||
expected: int64(2000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 200k period and 40k quota",
|
||||
input: int64(200),
|
||||
period: uint64(200000),
|
||||
expected: int64(40000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 20k period and 20k expected quota",
|
||||
input: int64(500),
|
||||
period: uint64(20000),
|
||||
expected: int64(10000),
|
||||
},
|
||||
{
|
||||
msg: "1000 input 10k period and 10k expected quota",
|
||||
input: int64(1000),
|
||||
period: uint64(10000),
|
||||
expected: int64(10000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 5000 period and 7500 expected quota",
|
||||
input: int64(1500),
|
||||
period: uint64(5000),
|
||||
expected: int64(7500),
|
||||
}} {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
quota := milliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.expected {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.expected, quota)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -19,6 +19,8 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
@ -65,7 +67,8 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
|
||||
if m.cpuCFSQuota {
|
||||
// if cpuLimit.Amount is nil, then the appropriate default value is returned
|
||||
// to allow full usage of cpu resource.
|
||||
cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue())
|
||||
cpuPeriod := int64(m.cpuCFSQuotaPeriod.Duration / time.Microsecond)
|
||||
cpuQuota := milliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod)
|
||||
lc.Resources.CpuQuota = cpuQuota
|
||||
lc.Resources.CpuPeriod = cpuPeriod
|
||||
}
|
||||
|
@ -98,6 +98,9 @@ type kubeGenericRuntimeManager struct {
|
||||
// If true, enforce container cpu limits with CFS quota support
|
||||
cpuCFSQuota bool
|
||||
|
||||
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
|
||||
cpuCFSQuotaPeriod metav1.Duration
|
||||
|
||||
// wrapped image puller.
|
||||
imagePuller images.ImageManager
|
||||
|
||||
@ -146,6 +149,7 @@ func NewKubeGenericRuntimeManager(
|
||||
imagePullQPS float32,
|
||||
imagePullBurst int,
|
||||
cpuCFSQuota bool,
|
||||
cpuCFSQuotaPeriod metav1.Duration,
|
||||
runtimeService internalapi.RuntimeService,
|
||||
imageService internalapi.ImageManagerService,
|
||||
internalLifecycle cm.InternalContainerLifecycle,
|
||||
@ -154,6 +158,7 @@ func NewKubeGenericRuntimeManager(
|
||||
kubeRuntimeManager := &kubeGenericRuntimeManager{
|
||||
recorder: recorder,
|
||||
cpuCFSQuota: cpuCFSQuota,
|
||||
cpuCFSQuotaPeriod: cpuCFSQuotaPeriod,
|
||||
seccompProfileRoot: seccompProfileRoot,
|
||||
livenessManager: livenessManager,
|
||||
containerRefManager: containerRefManager,
|
||||
|
Loading…
Reference in New Issue
Block a user