From 859bd02593cd58b9ef7aa1350d711a70d22f9f9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=83=A1=E7=8E=AE=E6=96=87?= Date: Tue, 11 Jun 2024 13:45:13 +0800 Subject: [PATCH] e2e: add TERM trap to pod sleep command This should avoid the 30s delay caused by shell not responding to SIGTERM, and can only be killed by SIGKILL. If the pod is deleted with the namespace during cleanup, this also makes cleanup faster, and frees up the resources for the next test cases faster. --- test/e2e/common/storage/empty_dir.go | 4 ++-- test/e2e/framework/deployment/fixtures.go | 2 +- test/e2e/framework/pod/create.go | 4 ++-- test/e2e/framework/pod/utils.go | 6 +++--- test/e2e/node/pod_resize.go | 3 +-- test/e2e/storage/testsuites/ephemeral.go | 5 ++--- test/e2e/storage/testsuites/subpath.go | 10 +++++----- test/e2e/storage/testsuites/volumelimits.go | 4 ++-- 8 files changed, 18 insertions(+), 20 deletions(-) diff --git a/test/e2e/common/storage/empty_dir.go b/test/e2e/common/storage/empty_dir.go index 310e03ac03d..74aaa44ded3 100644 --- a/test/e2e/common/storage/empty_dir.go +++ b/test/e2e/common/storage/empty_dir.go @@ -259,7 +259,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Name: busyBoxMainContainerName, Image: imageutils.GetE2EImage(imageutils.BusyBox), Command: []string{"/bin/sh"}, - Args: []string{"-c", "sleep 100000"}, + Args: []string{"-c", e2epod.InfiniteSleepCommand}, VolumeMounts: []v1.VolumeMount{ { Name: volumeName, @@ -330,7 +330,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Name: busyBoxMainContainerName, Image: imageutils.GetE2EImage(imageutils.BusyBox), Command: []string{"/bin/sh"}, - Args: []string{"-c", "sleep 100000"}, + Args: []string{"-c", e2epod.InfiniteSleepCommand}, VolumeMounts: []v1.VolumeMount{ { Name: volumeName, diff --git a/test/e2e/framework/deployment/fixtures.go b/test/e2e/framework/deployment/fixtures.go index 8054b50cf07..b8e583ff409 100644 --- a/test/e2e/framework/deployment/fixtures.go +++ b/test/e2e/framework/deployment/fixtures.go @@ -178,7 +178,7 @@ func (o replicaSetsByCreationTimestamp) Less(i, j int) bool { // name. A slice of BASH commands can be supplied as args to be run by the pod func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *appsv1.Deployment { if len(command) == 0 { - command = "trap exit TERM; while true; do sleep 1; done" + command = e2epod.InfiniteSleepCommand } zero := int64(0) deploymentName := "deployment-" + string(uuid.NewUUID()) diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index 1edd6457559..7378fa3e53e 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -131,7 +131,7 @@ func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interfa // name. A slice of BASH commands can be supplied as args to be run by the pod func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *v1.Pod { if len(command) == 0 { - command = "trap exit TERM; while true; do sleep 1; done" + command = InfiniteSleepCommand } podSpec := &v1.Pod{ TypeMeta: metav1.TypeMeta{ @@ -172,7 +172,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) { return nil, fmt.Errorf("Cannot create pod with empty namespace") } if len(podConfig.Command) == 0 { - podConfig.Command = "trap exit TERM; while true; do sleep 1; done" + podConfig.Command = InfiniteSleepCommand } podName := "pod-" + string(uuid.NewUUID()) diff --git a/test/e2e/framework/pod/utils.go b/test/e2e/framework/pod/utils.go index 3497f2207ca..cf33d9f1771 100644 --- a/test/e2e/framework/pod/utils.go +++ b/test/e2e/framework/pod/utils.go @@ -43,11 +43,11 @@ func NodeOSDistroIs(distro string) bool { return false } +const InfiniteSleepCommand = "trap exit TERM; while true; do sleep 1; done" + // GenerateScriptCmd generates the corresponding command lines to execute a command. func GenerateScriptCmd(command string) []string { - var commands []string - commands = []string{"/bin/sh", "-c", command} - return commands + return []string{"/bin/sh", "-c", command} } // GetDefaultTestImage returns the default test image based on OS. diff --git a/test/e2e/node/pod_resize.go b/test/e2e/node/pod_resize.go index 705fed23de0..3935b5e10fe 100644 --- a/test/e2e/node/pod_resize.go +++ b/test/e2e/node/pod_resize.go @@ -174,7 +174,6 @@ func initDefaultResizePolicy(containers []TestContainerInfo) { } func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStatus) { - cmd := "trap exit TERM; while true; do sleep 1; done" res, alloc, resizePol := getTestResourceInfo(tcInfo) bTrue := true bFalse := false @@ -209,7 +208,7 @@ func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStat Name: tcInfo.Name, Image: imageutils.GetE2EImage(imageutils.BusyBox), Command: []string{"/bin/sh"}, - Args: []string{"-c", cmd}, + Args: []string{"-c", e2epod.InfiniteSleepCommand}, Resources: res, ResizePolicy: resizePol, SecurityContext: securityContext, diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 6ff20720cd2..17b281ba03a 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -295,7 +295,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} { // Create another pod with the same inline volume attributes. - pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000", + pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", e2epod.InfiniteSleepCommand, []v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource}, readOnly, l.testCase.Node) @@ -387,7 +387,6 @@ func (t EphemeralTest) TestEphemeral(ctx context.Context) { gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required") ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node)) - command := "sleep 10000" var volumes []v1.VolumeSource numVolumes := t.NumInlineVolumes @@ -415,7 +414,7 @@ func (t EphemeralTest) TestEphemeral(ctx context.Context) { } volumes = append(volumes, volume) } - pod := StartInPodWithInlineVolume(ctx, client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node) + pod := StartInPodWithInlineVolume(ctx, client, t.Namespace, "inline-volume-tester", e2epod.InfiniteSleepCommand, volumes, t.ReadOnly, t.Node) defer func() { // pod might be nil now. StopPodAndDependents(ctx, client, t.Timeouts, pod) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index e46fd976959..1fb4c3ca95e 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -449,7 +449,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte // Change volume container to busybox so we can exec later l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage() - l.pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000") + l.pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand) l.pod.Spec.Containers[1].Args = nil ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name)) @@ -793,10 +793,10 @@ func testPodContainerRestartWithHooks(ctx context.Context, f *framework.Framewor pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage() - pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand) pod.Spec.Containers[0].Args = nil pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage() - pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand) pod.Spec.Containers[1].Args = nil hooks.AddLivenessProbe(pod, probeFilePath) @@ -971,10 +971,10 @@ func testSubpathReconstruction(ctx context.Context, f *framework.Framework, host // Change to busybox pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage() - pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand) pod.Spec.Containers[0].Args = nil pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage() - pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000") + pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd(e2epod.InfiniteSleepCommand) pod.Spec.Containers[1].Args = nil // If grace period is too short, then there is not enough time for the volume // manager to cleanup the volumes diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 53a7b794c5b..bb92ca20a9b 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -170,7 +170,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, // Create Pods. ginkgo.By(fmt.Sprintf("Creating %d Pod(s) with one volume each", limit)) for i := 0; i < limit; i++ { - pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", "sleep 1000000", selection) + pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", e2epod.InfiniteSleepCommand, selection) l.podNames = append(l.podNames, pod.Name) l.pvcNames = append(l.pvcNames, ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])) } @@ -214,7 +214,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, } ginkgo.By("Creating an extra pod with one volume to exceed the limit") - pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", "sleep 10000", selection) + pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", e2epod.InfiniteSleepCommand, selection) l.podNames = append(l.podNames, pod.Name) ginkgo.By("Waiting for the pod to get unschedulable with the right message")