mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Make POD container last OOM victim.
Setting the oom_score_adj of the PID of the POD container to -100 which is less than the default of 0. This ensures that this PID is the last OOM victim chosen by the kernel. Fixes #3067.
This commit is contained in:
parent
2d1a8d0da0
commit
8649628c6c
@ -56,6 +56,10 @@ const minShares = 2
|
|||||||
const sharesPerCPU = 1024
|
const sharesPerCPU = 1024
|
||||||
const milliCPUToCPU = 1000
|
const milliCPUToCPU = 1000
|
||||||
|
|
||||||
|
// The oom_score_adj of the POD infrastructure container. The default is 0, so
|
||||||
|
// any value below that makes it *less* likely to get OOM killed.
|
||||||
|
const podOomScoreAdj = -100
|
||||||
|
|
||||||
// SyncHandler is an interface implemented by Kubelet, for testability
|
// SyncHandler is an interface implemented by Kubelet, for testability
|
||||||
type SyncHandler interface {
|
type SyncHandler interface {
|
||||||
SyncPods([]api.BoundPod) error
|
SyncPods([]api.BoundPod) error
|
||||||
@ -938,7 +942,20 @@ func (kl *Kubelet) createPodInfraContainer(pod *api.BoundPod) (dockertools.Docke
|
|||||||
if ref != nil {
|
if ref != nil {
|
||||||
record.Eventf(ref, "pulled", "Successfully pulled image %q", container.Image)
|
record.Eventf(ref, "pulled", "Successfully pulled image %q", container.Image)
|
||||||
}
|
}
|
||||||
return kl.runContainer(pod, container, nil, "", "")
|
id, err := kl.runContainer(pod, container, nil, "", "")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set OOM score of POD container to lower than those of the other
|
||||||
|
// containers in the pod. This ensures that it is killed only as a last
|
||||||
|
// resort.
|
||||||
|
containerInfo, err := kl.dockerClient.InspectContainer(string(id))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return id, util.ApplyOomScoreAdj(containerInfo.State.Pid, podOomScoreAdj)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) pullImage(img string, ref *api.ObjectReference) error {
|
func (kl *Kubelet) pullImage(img string, ref *api.ObjectReference) error {
|
||||||
|
@ -437,7 +437,7 @@ func TestSyncPodsWithTerminationLog(t *testing.T) {
|
|||||||
}
|
}
|
||||||
kubelet.drainWorkers()
|
kubelet.drainWorkers()
|
||||||
verifyCalls(t, fakeDocker, []string{
|
verifyCalls(t, fakeDocker, []string{
|
||||||
"list", "create", "start", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
"list", "create", "start", "inspect_container", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
||||||
|
|
||||||
fakeDocker.Lock()
|
fakeDocker.Lock()
|
||||||
parts := strings.Split(fakeDocker.Container.HostConfig.Binds[0], ":")
|
parts := strings.Split(fakeDocker.Container.HostConfig.Binds[0], ":")
|
||||||
@ -497,7 +497,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
|
|||||||
kubelet.drainWorkers()
|
kubelet.drainWorkers()
|
||||||
|
|
||||||
verifyCalls(t, fakeDocker, []string{
|
verifyCalls(t, fakeDocker, []string{
|
||||||
"list", "create", "start", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
"list", "create", "start", "inspect_container", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
||||||
|
|
||||||
fakeDocker.Lock()
|
fakeDocker.Lock()
|
||||||
|
|
||||||
@ -547,7 +547,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
|
|||||||
kubelet.drainWorkers()
|
kubelet.drainWorkers()
|
||||||
|
|
||||||
verifyCalls(t, fakeDocker, []string{
|
verifyCalls(t, fakeDocker, []string{
|
||||||
"list", "create", "start", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
"list", "create", "start", "inspect_container", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
||||||
|
|
||||||
fakeDocker.Lock()
|
fakeDocker.Lock()
|
||||||
|
|
||||||
@ -563,7 +563,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
|
|||||||
fakeDocker.Unlock()
|
fakeDocker.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncPodsWithNetCreatesContainer(t *testing.T) {
|
func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) {
|
||||||
kubelet, fakeDocker := newTestKubelet(t)
|
kubelet, fakeDocker := newTestKubelet(t)
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
fakeDocker.ContainerList = []docker.APIContainers{
|
||||||
{
|
{
|
||||||
@ -604,7 +604,7 @@ func TestSyncPodsWithNetCreatesContainer(t *testing.T) {
|
|||||||
fakeDocker.Unlock()
|
fakeDocker.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncPodsWithNetCreatesContainerCallsHandler(t *testing.T) {
|
func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
||||||
kubelet, fakeDocker := newTestKubelet(t)
|
kubelet, fakeDocker := newTestKubelet(t)
|
||||||
fakeHttp := fakeHTTP{}
|
fakeHttp := fakeHTTP{}
|
||||||
kubelet.httpClient = &fakeHttp
|
kubelet.httpClient = &fakeHttp
|
||||||
@ -661,7 +661,7 @@ func TestSyncPodsWithNetCreatesContainerCallsHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncPodsDeletesWithNoNetContainer(t *testing.T) {
|
func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) {
|
||||||
kubelet, fakeDocker := newTestKubelet(t)
|
kubelet, fakeDocker := newTestKubelet(t)
|
||||||
fakeDocker.ContainerList = []docker.APIContainers{
|
fakeDocker.ContainerList = []docker.APIContainers{
|
||||||
{
|
{
|
||||||
@ -692,7 +692,7 @@ func TestSyncPodsDeletesWithNoNetContainer(t *testing.T) {
|
|||||||
kubelet.drainWorkers()
|
kubelet.drainWorkers()
|
||||||
|
|
||||||
verifyCalls(t, fakeDocker, []string{
|
verifyCalls(t, fakeDocker, []string{
|
||||||
"list", "stop", "create", "start", "list", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
"list", "stop", "create", "start", "inspect_container", "list", "list", "inspect_container", "inspect_image", "list", "create", "start"})
|
||||||
|
|
||||||
// A map iteration is used to delete containers, so must not depend on
|
// A map iteration is used to delete containers, so must not depend on
|
||||||
// order here.
|
// order here.
|
||||||
|
@ -97,14 +97,28 @@ func TestRunOnce(t *testing.T) {
|
|||||||
label: "syncPod",
|
label: "syncPod",
|
||||||
container: docker.Container{
|
container: docker.Container{
|
||||||
Config: &docker.Config{Image: "someimage"},
|
Config: &docker.Config{Image: "someimage"},
|
||||||
State: docker.State{Running: true},
|
State: docker.State{Running: true, Pid: 42},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
label: "syncPod",
|
label: "syncPod",
|
||||||
container: docker.Container{
|
container: docker.Container{
|
||||||
Config: &docker.Config{Image: "someimage"},
|
Config: &docker.Config{Image: "someimage"},
|
||||||
State: docker.State{Running: true},
|
State: docker.State{Running: true, Pid: 42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "syncPod",
|
||||||
|
container: docker.Container{
|
||||||
|
Config: &docker.Config{Image: "someimage"},
|
||||||
|
State: docker.State{Running: true, Pid: 42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "syncPod",
|
||||||
|
container: docker.Container{
|
||||||
|
Config: &docker.Config{Image: "someimage"},
|
||||||
|
State: docker.State{Running: true, Pid: 42},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user