mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
Revert "assign -998 as the oom_score_adj for critical pods."
This reverts commit 53931fbce4
.
This commit is contained in:
parent
b8a63537dd
commit
a3ae8c2b21
@ -16,20 +16,14 @@ limitations under the License.
|
||||
|
||||
package qos
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
import "k8s.io/kubernetes/pkg/api/v1"
|
||||
|
||||
const (
|
||||
// PodInfraOOMAdj is very docker specific. For arbitrary runtime, it may not make
|
||||
// sense to set sandbox level oom score, e.g. a sandbox could only be a namespace
|
||||
// without a process.
|
||||
// TODO: Handle infra container oom score adj in a runtime agnostic way.
|
||||
// TODO: Should handle critical pod oom score adj with a proper preemption priority.
|
||||
// This is the workaround for https://github.com/kubernetes/kubernetes/issues/38322.
|
||||
PodInfraOOMAdj int = -998
|
||||
CriticalPodOOMAdj int = -998
|
||||
KubeletOOMScoreAdj int = -999
|
||||
DockerOOMScoreAdj int = -999
|
||||
KubeProxyOOMScoreAdj int = -999
|
||||
@ -44,10 +38,6 @@ const (
|
||||
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
|
||||
// See https://lwn.net/Articles/391222/ for more information.
|
||||
func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int {
|
||||
if kubetypes.IsCriticalPod(pod) {
|
||||
return CriticalPodOOMAdj
|
||||
}
|
||||
|
||||
switch GetPodQOS(pod) {
|
||||
case v1.PodQOSGuaranteed:
|
||||
// Guaranteed containers should be the last to get killed.
|
||||
|
@ -21,9 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -137,25 +135,6 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
criticalPodWithNoLimit = v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
kubetypes.CriticalPodAnnotationKey: "",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)),
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type oomTest struct {
|
||||
@ -209,12 +188,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
lowOOMScoreAdj: 2,
|
||||
highOOMScoreAdj: 2,
|
||||
},
|
||||
{
|
||||
pod: &criticalPodWithNoLimit,
|
||||
memoryCapacity: standardMemoryAmount,
|
||||
lowOOMScoreAdj: -998,
|
||||
highOOMScoreAdj: -998,
|
||||
},
|
||||
}
|
||||
for _, test := range oomTests {
|
||||
oomScoreAdj := GetContainerOOMScoreAdjust(test.pod, &test.pod.Spec.Containers[0], test.memoryCapacity)
|
||||
|
Loading…
Reference in New Issue
Block a user