From 1bc5364b16c7c446f9577249bd04139421c5ef65 Mon Sep 17 00:00:00 2001 From: Eddie Zaneski Date: Tue, 20 Apr 2021 17:58:32 -0600 Subject: [PATCH] [e2e] match kubectl run timeout to pod start timeout Signed-off-by: Eddie Zaneski --- test/e2e/kubectl/kubectl.go | 124 +++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 57 deletions(-) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 3d51d0ce515..f6810401361 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -109,6 +109,9 @@ var ( httpdImage = imageutils.GetE2EImage(imageutils.Httpd) busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox) agnhostImage = imageutils.GetE2EImage(imageutils.Agnhost) + + // If this suite still flakes due to timeouts we should change this to framework.PodStartTimeout + podRunningTimeoutArg = fmt.Sprintf("--pod-running-timeout=%s", framework.PodStartShortTimeout.String()) ) var proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") @@ -387,7 +390,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should support exec", func() { ginkgo.By("executing a command in the container") - execOutput := framework.RunKubectlOrDie(ns, "exec", simplePodName, "echo", "running", "in", "container") + execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -397,11 +400,11 @@ var _ = SIGDescribe("Kubectl client", func() { for i := 0; i < len(veryLongData); i++ { veryLongData[i] = 'a' } - execOutput = framework.RunKubectlOrDie(ns, "exec", simplePodName, "echo", string(veryLongData)) + execOutput = framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData)) framework.ExpectEqual(string(veryLongData), strings.TrimSpace(execOutput), "Unexpected kubectl exec output") ginkgo.By("executing a command in the container with noninteractive stdin") - execOutput = framework.NewKubectlCommand(ns, "exec", "-i", simplePodName, "cat"). + execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat"). WithStdinData("abcd1234"). ExecOrDie(ns) if e, a := "abcd1234", execOutput; e != a { @@ -417,7 +420,7 @@ var _ = SIGDescribe("Kubectl client", func() { defer closer.Close() ginkgo.By("executing a command in the container with pseudo-interactive stdin") - execOutput = framework.NewKubectlCommand(ns, "exec", "-i", simplePodName, "sh"). + execOutput = framework.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh"). WithStdinReader(r). ExecOrDie(ns) if e, a := "hi", strings.TrimSpace(execOutput); e != a { @@ -427,7 +430,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should support exec using resource/name", func() { ginkgo.By("executing a command in the container") - execOutput := framework.RunKubectlOrDie(ns, "exec", simplePodResourceName, "echo", "running", "in", "container") + execOutput := framework.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } @@ -447,7 +450,7 @@ var _ = SIGDescribe("Kubectl client", func() { for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} { proxyLogs.Reset() ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar) - output := framework.NewKubectlCommand(ns, fmt.Sprintf("--namespace=%s", ns), "exec", "httpd", "echo", "running", "in", "container"). + output := framework.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container"). WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))). ExecOrDie(ns) @@ -482,8 +485,8 @@ var _ = SIGDescribe("Kubectl client", func() { host := fmt.Sprintf("--server=http://127.0.0.1:%d", port) ginkgo.By("Running kubectl via kubectl proxy using " + host) output := framework.NewKubectlCommand( - ns, host, fmt.Sprintf("--namespace=%s", ns), - "exec", "httpd", "echo", "running", "in", "container", + ns, host, + "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container", ).ExecOrDie(ns) // Verify we got the normal output captured by the exec server @@ -493,53 +496,60 @@ var _ = SIGDescribe("Kubectl client", func() { } }) - ginkgo.It("should return command exit codes", func() { - ginkgo.By("execing into a container with a successful command") - _, err := framework.NewKubectlCommand(ns, "exec", "httpd", "--", "/bin/sh", "-c", "exit 0").Exec() - framework.ExpectNoError(err) + ginkgo.Context("should return command exit codes", func() { + ginkgo.It("execing into a container with a successful command", func() { + _, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec() + framework.ExpectNoError(err) + }) - ginkgo.By("execing into a container with a failing command") - _, err = framework.NewKubectlCommand(ns, "exec", "httpd", "--", "/bin/sh", "-c", "exit 42").Exec() - ee, ok := err.(uexec.ExitError) - framework.ExpectEqual(ok, true) - framework.ExpectEqual(ee.ExitStatus(), 42) + ginkgo.It("execing into a container with a failing command", func() { + _, err := framework.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec() + ee, ok := err.(uexec.ExitError) + framework.ExpectEqual(ok, true) + framework.ExpectEqual(ee.ExitStatus(), 42) + }) - ginkgo.By("running a successful command") - _, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec() - framework.ExpectNoError(err) + ginkgo.It("running a successful command", func() { + _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec() + framework.ExpectNoError(err) + }) - ginkgo.By("running a failing command") - _, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() - ee, ok = err.(uexec.ExitError) - framework.ExpectEqual(ok, true) - framework.ExpectEqual(ee.ExitStatus(), 42) + ginkgo.It("running a failing command", func() { + _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec() + ee, ok := err.(uexec.ExitError) + framework.ExpectEqual(ok, true) + framework.ExpectEqual(ee.ExitStatus(), 42) + }) - ginkgo.By("running a failing command without --restart=Never") - _, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). - WithStdinData("abcd1234"). - Exec() - ee, ok = err.(uexec.ExitError) - framework.ExpectEqual(ok, true) - if !strings.Contains(ee.String(), "timed out") { - framework.Failf("Missing expected 'timed out' error, got: %#v", ee) - } + ginkgo.It("[Slow] running a failing command without --restart=Never", func() { + _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). + WithStdinData("abcd1234"). + Exec() + ee, ok := err.(uexec.ExitError) + framework.ExpectEqual(ok, true) + if !strings.Contains(ee.String(), "timed out") { + framework.Failf("Missing expected 'timed out' error, got: %#v", ee) + } + }) - ginkgo.By("running a failing command without --restart=Never, but with --rm") - _, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). - WithStdinData("abcd1234"). - Exec() - ee, ok = err.(uexec.ExitError) - framework.ExpectEqual(ok, true) - if !strings.Contains(ee.String(), "timed out") { - framework.Failf("Missing expected 'timed out' error, got: %#v", ee) - } - e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) + ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func() { + _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). + WithStdinData("abcd1234"). + Exec() + ee, ok := err.(uexec.ExitError) + framework.ExpectEqual(ok, true) + if !strings.Contains(ee.String(), "timed out") { + framework.Failf("Missing expected 'timed out' error, got: %#v", ee) + } + e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) + }) - ginkgo.By("running a failing command with --leave-stdin-open") - _, err = framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). - WithStdinData("abcd1234"). - Exec() - framework.ExpectNoError(err) + ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func() { + _, err := framework.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). + WithStdinData("abcd1234"). + Exec() + framework.ExpectNoError(err) + }) }) ginkgo.It("should support inline execution and attach", func() { @@ -556,7 +566,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("executing a command with run and attach with stdin") // We wait for a non-empty line so we know kubectl has attached - framework.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'"). + framework.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'"). WithStdinData("value\nabcd1234"). ExecOrDie(ns) @@ -573,7 +583,7 @@ var _ = SIGDescribe("Kubectl client", func() { // "stdin closed", but hasn't exited yet. // We wait 10 seconds before printing to give time to kubectl to attach // to the container, this does not solve the race though. - framework.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). + framework.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234"). ExecOrDie(ns) @@ -584,7 +594,7 @@ var _ = SIGDescribe("Kubectl client", func() { gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil()) ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running") - framework.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). + framework.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'"). WithStdinData("abcd1234\n"). ExecOrDie(ns) @@ -606,7 +616,7 @@ var _ = SIGDescribe("Kubectl client", func() { podName := "run-log-test" ginkgo.By("executing a command with run") - framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") + framework.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { framework.Failf("Pod for run-log-test was not ready") @@ -909,7 +919,7 @@ metadata: framework.ConformanceIt("should check if kubectl can dry-run update Pods", func() { ginkgo.By("running the image " + httpdImage) podName := "e2e-test-httpd-pod" - framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, "--labels=run="+podName) + framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) ginkgo.By("replace the image in the pod with server-side dry-run") specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, busyboxImage) @@ -1386,7 +1396,7 @@ metadata: ginkgo.BeforeEach(func() { ginkgo.By("creating an pod") // Agnhost image generates logs for a total of 100 lines over 20s. - framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") + framework.RunKubectlOrDie(ns, "run", podName, "--image="+agnhostImage, "--restart=Never", podRunningTimeoutArg, "--", "logs-generator", "--log-lines-total", "100", "--run-duration", "20s") }) ginkgo.AfterEach(func() { framework.RunKubectlOrDie(ns, "delete", "pod", podName) @@ -1526,7 +1536,7 @@ metadata: */ framework.ConformanceIt("should create a pod from an image when restart is Never ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", "--image="+httpdImage) + framework.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage) ginkgo.By("verifying the pod " + podName + " was created") pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{}) if err != nil { @@ -1560,7 +1570,7 @@ metadata: */ framework.ConformanceIt("should update a single-container pod's image ", func() { ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, "--labels=run="+podName) + framework.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName) ginkgo.By("verifying the pod " + podName + " is running") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))