mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Add e2e_node test for oom killed container reason
This commit is contained in:
parent
1d02d014e8
commit
fd28f69ca4
@ -241,3 +241,23 @@ func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditi
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FindContainerStatusInPod finds a container status by its name in the provided pod
|
||||||
|
func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
|
||||||
|
for _, containerStatus := range pod.Status.InitContainerStatuses {
|
||||||
|
if containerStatus.Name == containerName {
|
||||||
|
return &containerStatus
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, containerStatus := range pod.Status.ContainerStatuses {
|
||||||
|
if containerStatus.Name == containerName {
|
||||||
|
return &containerStatus
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, containerStatus := range pod.Status.EphemeralContainerStatuses {
|
||||||
|
if containerStatus.Name == containerName {
|
||||||
|
return &containerStatus
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -651,13 +651,13 @@ func (v *podStartVerifier) Verify(event watch.Event) error {
|
|||||||
return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.ContainerStatuses)
|
return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status.ContainerStatuses)
|
||||||
}
|
}
|
||||||
|
|
||||||
if status := findContainerStatusInPod(pod, "blocked"); status != nil {
|
if status := e2epod.FindContainerStatusInPod(pod, "blocked"); status != nil {
|
||||||
if (status.Started != nil && *status.Started == true) || status.LastTerminationState.Terminated != nil || status.State.Waiting == nil {
|
if (status.Started != nil && *status.Started == true) || status.LastTerminationState.Terminated != nil || status.State.Waiting == nil {
|
||||||
return fmt.Errorf("pod %s on node %s should not have started the blocked container: %#v", pod.Name, pod.Spec.NodeName, status)
|
return fmt.Errorf("pod %s on node %s should not have started the blocked container: %#v", pod.Name, pod.Spec.NodeName, status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
status := findContainerStatusInPod(pod, "fail")
|
status := e2epod.FindContainerStatusInPod(pod, "fail")
|
||||||
if status == nil {
|
if status == nil {
|
||||||
return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status)
|
return fmt.Errorf("pod %s on node %s had incorrect containers: %#v", pod.Name, pod.Spec.NodeName, pod.Status)
|
||||||
}
|
}
|
||||||
@ -741,23 +741,3 @@ func (v *podStartVerifier) VerifyFinal(scenario string, total time.Duration) (*v
|
|||||||
framework.Logf("Pod %s on node %s %s total=%s run=%s execute=%s", pod.Name, pod.Spec.NodeName, scenario, total, v.completeDuration, v.duration)
|
framework.Logf("Pod %s on node %s %s total=%s run=%s execute=%s", pod.Name, pod.Spec.NodeName, scenario, total, v.completeDuration, v.duration)
|
||||||
return pod, errs
|
return pod, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// findContainerStatusInPod finds a container status by its name in the provided pod
|
|
||||||
func findContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
|
|
||||||
for _, container := range pod.Status.InitContainerStatuses {
|
|
||||||
if container.Name == containerName {
|
|
||||||
return &container
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, container := range pod.Status.ContainerStatuses {
|
|
||||||
if container.Name == containerName {
|
|
||||||
return &container
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, container := range pod.Status.EphemeralContainerStatuses {
|
|
||||||
if container.Name == containerName {
|
|
||||||
return &container
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
124
test/e2e_node/oomkiller_test.go
Normal file
124
test/e2e_node/oomkiller_test.go
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package e2enode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
podSpec *v1.Pod
|
||||||
|
oomTargetContainerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
const PodOOMKilledTimeout = 2 * time.Minute
|
||||||
|
|
||||||
|
var _ = SIGDescribe("OOMKiller [LinuxOnly] [NodeConformance]", func() {
|
||||||
|
f := framework.NewDefaultFramework("oomkiller-test")
|
||||||
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||||
|
|
||||||
|
containerName := "oomkill-target-container"
|
||||||
|
oomPodSpec := getOOMTargetPod("oomkill-target-pod", containerName)
|
||||||
|
runOomKillerTest(f, testCase{podSpec: oomPodSpec, oomTargetContainerName: containerName})
|
||||||
|
})
|
||||||
|
|
||||||
|
func runOomKillerTest(f *framework.Framework, testCase testCase) {
|
||||||
|
ginkgo.Context("", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
ginkgo.By("setting up the pod to be used in the test")
|
||||||
|
e2epod.NewPodClient(f).Create(context.TODO(), testCase.podSpec)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("The containers terminated by OOM killer should have the reason set to OOMKilled", func() {
|
||||||
|
|
||||||
|
ginkgo.By("Waiting for the pod to be failed")
|
||||||
|
e2epod.WaitForPodTerminatedInNamespace(context.TODO(), f.ClientSet, testCase.podSpec.Name, "", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Fetching the latest pod status")
|
||||||
|
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), testCase.podSpec.Name, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Verifying the OOM target container has the expected reason")
|
||||||
|
verifyReasonForOOMKilledContainer(pod, testCase.oomTargetContainerName)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.AfterEach(func() {
|
||||||
|
ginkgo.By(fmt.Sprintf("deleting pod: %s", testCase.podSpec.Name))
|
||||||
|
e2epod.NewPodClient(f).DeleteSync(context.TODO(), testCase.podSpec.Name, metav1.DeleteOptions{}, framework.PodDeleteTimeout)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyReasonForOOMKilledContainer(pod *v1.Pod, oomTargetContainerName string) {
|
||||||
|
container := e2epod.FindContainerStatusInPod(pod, oomTargetContainerName)
|
||||||
|
if container == nil {
|
||||||
|
framework.Failf("OOM target pod %q, container %q does not have the expected state terminated", pod.Name, container.Name)
|
||||||
|
}
|
||||||
|
if container.State.Terminated == nil {
|
||||||
|
framework.Failf("OOM target pod %q, container %q is not in the terminated state", pod.Name, container.Name)
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(container.State.Terminated.ExitCode, int32(137),
|
||||||
|
fmt.Sprintf("pod: %q, container: %q has unexpected exitCode: %q", pod.Name, container.Name, container.State.Terminated.ExitCode))
|
||||||
|
framework.ExpectEqual(container.State.Terminated.Reason, "OOMKilled",
|
||||||
|
fmt.Sprintf("pod: %q, container: %q has unexpected reason: %q", pod.Name, container.Name, container.State.Terminated.Reason))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOOMTargetPod(podName string, ctnName string) *v1.Pod {
|
||||||
|
return &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Containers: []v1.Container{
|
||||||
|
getOOMTargetContainer(ctnName),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOOMTargetContainer(name string) v1.Container {
|
||||||
|
return v1.Container{
|
||||||
|
Name: name,
|
||||||
|
Image: busyboxImage,
|
||||||
|
Command: []string{
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
// use the dd tool to attempt to allocate 20M in a block which exceeds the limit
|
||||||
|
"dd if=/dev/zero of=/dev/null bs=20M",
|
||||||
|
},
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("15Mi"),
|
||||||
|
},
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("15Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user