mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
default memoryThrottlingFactor to 0.9 and optimize the memory.high calculation formulas
This commit is contained in:
parent
3835c7aecd
commit
7dab6253e1
2
pkg/generated/openapi/zz_generated.openapi.go
generated
2
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -58071,7 +58071,7 @@ func schema_k8sio_kubelet_config_v1beta1_KubeletConfiguration(ref common.Referen
|
||||
},
|
||||
"memoryThrottlingFactor": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "MemoryThrottlingFactor specifies the factor multiplied by the memory limit or node allocatable memory when setting the cgroupv2 memory.high value to enforce MemoryQoS. Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure while increasing will put less reclaim pressure. See https://kep.k8s.io/2570 for more details. Default: 0.8",
|
||||
Description: "MemoryThrottlingFactor specifies the factor multiplied by the memory limit or node allocatable memory when setting the cgroupv2 memory.high value to enforce MemoryQoS. Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure while increasing will put less reclaim pressure. See https://kep.k8s.io/2570 for more details. Default: 0.9",
|
||||
Type: []string{"number"},
|
||||
Format: "double",
|
||||
},
|
||||
|
@ -61,7 +61,7 @@ maxOpenFiles: 1000000
|
||||
maxPods: 110
|
||||
memoryManagerPolicy: None
|
||||
memorySwap: {}
|
||||
memoryThrottlingFactor: 0.8
|
||||
memoryThrottlingFactor: 0.9
|
||||
nodeLeaseDurationSeconds: 40
|
||||
nodeStatusMaxImages: 50
|
||||
nodeStatusReportFrequency: 5m0s
|
||||
|
@ -61,7 +61,7 @@ maxOpenFiles: 1000000
|
||||
maxPods: 110
|
||||
memoryManagerPolicy: None
|
||||
memorySwap: {}
|
||||
memoryThrottlingFactor: 0.8
|
||||
memoryThrottlingFactor: 0.9
|
||||
nodeLeaseDurationSeconds: 40
|
||||
nodeStatusMaxImages: 50
|
||||
nodeStatusReportFrequency: 5m0s
|
||||
|
@ -440,7 +440,7 @@ type KubeletConfiguration struct {
|
||||
// Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure
|
||||
// while increasing will put less reclaim pressure.
|
||||
// See https://kep.k8s.io/2570 for more details.
|
||||
// Default: 0.8
|
||||
// Default: 0.9
|
||||
// +featureGate=MemoryQoS
|
||||
// +optional
|
||||
MemoryThrottlingFactor *float64
|
||||
|
@ -38,7 +38,7 @@ const (
|
||||
DefaultVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
|
||||
|
||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos
|
||||
DefaultMemoryThrottlingFactor = 0.8
|
||||
DefaultMemoryThrottlingFactor = 0.9
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -65,7 +65,7 @@ var (
|
||||
TopologyManagerPolicy: kubeletconfig.SingleNumaNodeTopologyManagerPolicy,
|
||||
ShutdownGracePeriod: metav1.Duration{Duration: 30 * time.Second},
|
||||
ShutdownGracePeriodCriticalPods: metav1.Duration{Duration: 10 * time.Second},
|
||||
MemoryThrottlingFactor: utilpointer.Float64(0.8),
|
||||
MemoryThrottlingFactor: utilpointer.Float64(0.9),
|
||||
FeatureGates: map[string]bool{
|
||||
"CustomCPUCFSQuotaPeriod": true,
|
||||
"GracefulNodeShutdown": true,
|
||||
|
@ -113,7 +113,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
||||
internalLifecycle: cm.NewFakeInternalContainerLifecycle(),
|
||||
logReduction: logreduction.NewLogReduction(identicalErrorDelay),
|
||||
logManager: logManager,
|
||||
memoryThrottlingFactor: 0.8,
|
||||
memoryThrottlingFactor: 0.9,
|
||||
}
|
||||
|
||||
typedVersion, err := runtimeService.Version(ctx, kubeRuntimeAPIVersion)
|
||||
|
@ -118,12 +118,12 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
|
||||
// for container level cgroup.
|
||||
memoryHigh := int64(0)
|
||||
if memoryLimit != 0 {
|
||||
memoryHigh = int64(float64(memoryLimit) * m.memoryThrottlingFactor)
|
||||
memoryHigh = int64(float64(memoryRequest) + (float64(memoryLimit)-float64(memoryRequest))*m.memoryThrottlingFactor)
|
||||
} else {
|
||||
allocatable := m.getNodeAllocatable()
|
||||
allocatableMemory, ok := allocatable[v1.ResourceMemory]
|
||||
if ok && allocatableMemory.Value() > 0 {
|
||||
memoryHigh = int64(float64(allocatableMemory.Value()) * m.memoryThrottlingFactor)
|
||||
memoryHigh = int64(float64(memoryRequest) + (float64(allocatableMemory.Value())-float64(memoryRequest))*m.memoryThrottlingFactor)
|
||||
}
|
||||
}
|
||||
if memoryHigh > memoryRequest {
|
||||
|
@ -307,6 +307,8 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
assert.NoError(t, err)
|
||||
|
||||
podRequestMemory := resource.MustParse("128Mi")
|
||||
pod1LimitMemory := resource.MustParse("256Mi")
|
||||
pod1 := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
@ -323,10 +325,10 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
WorkingDir: "testWorkingDir",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("128Mi"),
|
||||
v1.ResourceMemory: podRequestMemory,
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("256Mi"),
|
||||
v1.ResourceMemory: pod1LimitMemory,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -350,7 +352,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
WorkingDir: "testWorkingDir",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("128Mi"),
|
||||
v1.ResourceMemory: podRequestMemory,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -358,7 +360,8 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
},
|
||||
}
|
||||
memoryNodeAllocatable := resource.MustParse(fakeNodeAllocatableMemory)
|
||||
pod2MemoryHigh := float64(memoryNodeAllocatable.Value()) * m.memoryThrottlingFactor
|
||||
pod1MemoryHigh := float64(podRequestMemory.Value()) + (float64(pod1LimitMemory.Value())-float64(podRequestMemory.Value()))*m.memoryThrottlingFactor
|
||||
pod2MemoryHigh := float64(podRequestMemory.Value()) + (float64(memoryNodeAllocatable.Value())-float64(podRequestMemory.Value()))*m.memoryThrottlingFactor
|
||||
|
||||
type expectedResult struct {
|
||||
containerConfig *runtimeapi.LinuxContainerConfig
|
||||
@ -378,7 +381,7 @@ func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) {
|
||||
expected: &expectedResult{
|
||||
l1,
|
||||
128 * 1024 * 1024,
|
||||
int64(float64(256*1024*1024) * m.memoryThrottlingFactor),
|
||||
int64(pod1MemoryHigh),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -778,7 +778,7 @@ type KubeletConfiguration struct {
|
||||
// Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure
|
||||
// while increasing will put less reclaim pressure.
|
||||
// See https://kep.k8s.io/2570 for more details.
|
||||
// Default: 0.8
|
||||
// Default: 0.9
|
||||
// +featureGate=MemoryQoS
|
||||
// +optional
|
||||
MemoryThrottlingFactor *float64 `json:"memoryThrottlingFactor,omitempty"`
|
||||
|
Loading…
Reference in New Issue
Block a user