mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
e2e: add TERM trap to pod sleep command
This should avoid the 30s delay caused by shell not responding to SIGTERM, and can only be killed by SIGKILL. If the pod is deleted with the namespace during cleanup, this also makes cleanup faster, and frees up the resources for the next test cases faster.
This commit is contained in:
parent
9d63e575f8
commit
859bd02593
@ -259,7 +259,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
|
||||
Name: busyBoxMainContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "sleep 100000"},
|
||||
Args: []string{"-c", e2epod.InfiniteSleepCommand},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
@ -330,7 +330,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
|
||||
Name: busyBoxMainContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "sleep 100000"},
|
||||
Args: []string{"-c", e2epod.InfiniteSleepCommand},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
|
@ -178,7 +178,7 @@ func (o replicaSetsByCreationTimestamp) Less(i, j int) bool {
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *appsv1.Deployment {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
command = e2epod.InfiniteSleepCommand
|
||||
}
|
||||
zero := int64(0)
|
||||
deploymentName := "deployment-" + string(uuid.NewUUID())
|
||||
|
@ -131,7 +131,7 @@ func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interfa
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
command = InfiniteSleepCommand
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -172,7 +172,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
return nil, fmt.Errorf("Cannot create pod with empty namespace")
|
||||
}
|
||||
if len(podConfig.Command) == 0 {
|
||||
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
|
||||
podConfig.Command = InfiniteSleepCommand
|
||||
}
|
||||
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
|
@ -43,11 +43,11 @@ func NodeOSDistroIs(distro string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
const InfiniteSleepCommand = "trap exit TERM; while true; do sleep 1; done"
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
return commands
|
||||
return []string{"/bin/sh", "-c", command}
|
||||
}
|
||||
|
||||
// GetDefaultTestImage returns the default test image based on OS.
|
||||
|
@ -174,7 +174,6 @@ func initDefaultResizePolicy(containers []TestContainerInfo) {
|
||||
}
|
||||
|
||||
func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStatus) {
|
||||
cmd := "trap exit TERM; while true; do sleep 1; done"
|
||||
res, alloc, resizePol := getTestResourceInfo(tcInfo)
|
||||
bTrue := true
|
||||
bFalse := false
|
||||
@ -209,7 +208,7 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat
|
||||
Name: tcInfo.Name,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", cmd},
|
||||
Args: []string{"-c", e2epod.InfiniteSleepCommand},
|
||||
Resources: res,
|
||||
ResizePolicy: resizePol,
|
||||
SecurityContext: securityContext,
|
||||
|
@ -295,7 +295,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
|
||||
|
||||
l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} {
|
||||
// Create another pod with the same inline volume attributes.
|
||||
pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000",
|
||||
pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", e2epod.InfiniteSleepCommand,
|
||||
[]v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource},
|
||||
readOnly,
|
||||
l.testCase.Node)
|
||||
@ -387,7 +387,6 @@ func (t EphemeralTest) TestEphemeral(ctx context.Context) {
|
||||
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
|
||||
command := "sleep 10000"
|
||||
|
||||
var volumes []v1.VolumeSource
|
||||
numVolumes := t.NumInlineVolumes
|
||||
@ -415,7 +414,7 @@ func (t EphemeralTest) TestEphemeral(ctx context.Context) {
|
||||
}
|
||||
volumes = append(volumes, volume)
|
||||
}
|
||||
pod := StartInPodWithInlineVolume(ctx, client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
|
||||
pod := StartInPodWithInlineVolume(ctx, client, t.Namespace, "inline-volume-tester", e2epod.InfiniteSleepCommand, volumes, t.ReadOnly, t.Node)
|
||||
defer func() {
|
||||
// pod might be nil now.
|
||||
StopPodAndDependents(ctx, client, t.Timeouts, pod)
|
||||
|
@ -449,7 +449,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
|
||||
|
||||
// Change volume container to busybox so we can exec later
|
||||
l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
|
||||
l.pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
l.pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand)
|
||||
l.pod.Spec.Containers[1].Args = nil
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
|
||||
@ -793,10 +793,10 @@ func testPodContainerRestartWithHooks(ctx context.Context, f *framework.Framewor
|
||||
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
|
||||
|
||||
pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand)
|
||||
pod.Spec.Containers[0].Args = nil
|
||||
pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand)
|
||||
pod.Spec.Containers[1].Args = nil
|
||||
hooks.AddLivenessProbe(pod, probeFilePath)
|
||||
|
||||
@ -971,10 +971,10 @@ func testSubpathReconstruction(ctx context.Context, f *framework.Framework, host
|
||||
|
||||
// Change to busybox
|
||||
pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand)
|
||||
pod.Spec.Containers[0].Args = nil
|
||||
pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand)
|
||||
pod.Spec.Containers[1].Args = nil
|
||||
// If grace period is too short, then there is not enough time for the volume
|
||||
// manager to cleanup the volumes
|
||||
|
@ -170,7 +170,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
// Create <limit> Pods.
|
||||
ginkgo.By(fmt.Sprintf("Creating %d Pod(s) with one volume each", limit))
|
||||
for i := 0; i < limit; i++ {
|
||||
pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", "sleep 1000000", selection)
|
||||
pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", e2epod.InfiniteSleepCommand, selection)
|
||||
l.podNames = append(l.podNames, pod.Name)
|
||||
l.pvcNames = append(l.pvcNames, ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0]))
|
||||
}
|
||||
@ -214,7 +214,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
}
|
||||
|
||||
ginkgo.By("Creating an extra pod with one volume to exceed the limit")
|
||||
pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", "sleep 10000", selection)
|
||||
pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", e2epod.InfiniteSleepCommand, selection)
|
||||
l.podNames = append(l.podNames, pod.Name)
|
||||
|
||||
ginkgo.By("Waiting for the pod to get unschedulable with the right message")
|
||||
|
Loading…
Reference in New Issue
Block a user