assign -998 as the oom_score_adj for critical pods.

This commit is contained in:
Dawn Chen 2016-12-21 11:23:41 -08:00
parent 1955ed614f
commit 53931fbce4
2 changed files with 37 additions and 1 deletions

View File

@ -16,14 +16,20 @@ limitations under the License.
package qos
import "k8s.io/kubernetes/pkg/api/v1"
import (
"k8s.io/kubernetes/pkg/api/v1"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
)
const (
// PodInfraOOMAdj is very docker specific. For arbitrary runtime, it may not make
// sense to set sandbox level oom score, e.g. a sandbox could only be a namespace
// without a process.
// TODO: Handle infra container oom score adj in a runtime agnostic way.
// TODO: Should handle critical pod oom score adj with a proper preemption priority.
// This is the workaround for https://github.com/kubernetes/kubernetes/issues/38322.
PodInfraOOMAdj int = -998
CriticalPodOOMAdj int = -998
KubeletOOMScoreAdj int = -999
DockerOOMScoreAdj int = -999
KubeProxyOOMScoreAdj int = -999
@ -38,6 +44,10 @@ const (
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapacity int64) int {
if kubepod.IsCriticalPod(pod) {
return CriticalPodOOMAdj
}
switch GetPodQOS(pod) {
case Guaranteed:
// Guaranteed containers should be the last to get killed.

View File

@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
const (
@ -135,6 +136,25 @@ var (
},
},
}
criticalPodWithNoLimit = v1.Pod{
ObjectMeta: v1.ObjectMeta{
Annotations: map[string]string{
kubetypes.CriticalPodAnnotationKey: "",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceMemory): resource.MustParse(strconv.Itoa(standardMemoryAmount - 1)),
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5m"),
},
},
},
},
},
}
)
type oomTest struct {
@ -188,6 +208,12 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
lowOOMScoreAdj: 2,
highOOMScoreAdj: 2,
},
{
pod: &criticalPodWithNoLimit,
memoryCapacity: standardMemoryAmount,
lowOOMScoreAdj: -998,
highOOMScoreAdj: -998,
},
}
for _, test := range oomTests {
oomScoreAdj := GetContainerOOMScoreAdjust(test.pod, &test.pod.Spec.Containers[0], test.memoryCapacity)