mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #114565 from aojea/fix_e2e_after_contexts
cleanup e2e kubectl cli tests
This commit is contained in:
commit
948d5b8d52
@ -480,7 +480,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
|
||||
proxyLogs.Reset()
|
||||
ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar)
|
||||
output := e2ekubectl.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container").
|
||||
output := e2ekubectl.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container").
|
||||
WithEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
|
||||
ExecOrDie(ns)
|
||||
|
||||
@ -516,7 +516,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
ginkgo.By("Running kubectl via kubectl proxy using " + host)
|
||||
output := e2ekubectl.NewKubectlCommand(
|
||||
ns, host,
|
||||
"exec", podRunningTimeoutArg, "httpd", "--", "echo", "running", "in", "container",
|
||||
"exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container",
|
||||
).ExecOrDie(ns)
|
||||
|
||||
// Verify we got the normal output captured by the exec server
|
||||
@ -528,12 +528,12 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
|
||||
ginkgo.Context("should return command exit codes", func() {
|
||||
ginkgo.It("execing into a container with a successful command", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec()
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "exec", simplePodName, podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("execing into a container with a failing command", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "exec", "httpd", podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec()
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "exec", simplePodName, podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec()
|
||||
ee, ok := err.(uexec.ExitError)
|
||||
if !ok {
|
||||
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
|
||||
@ -541,129 +541,6 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
framework.ExpectEqual(ee.ExitStatus(), 42)
|
||||
})
|
||||
|
||||
ginkgo.It("running a successful command", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("running a failing command", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
|
||||
ee, ok := err.(uexec.ExitError)
|
||||
if !ok {
|
||||
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
|
||||
}
|
||||
framework.ExpectEqual(ee.ExitStatus(), 42)
|
||||
})
|
||||
|
||||
ginkgo.It("[Slow] running a failing command without --restart=Never", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
ee, ok := err.(uexec.ExitError)
|
||||
if !ok {
|
||||
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
|
||||
}
|
||||
if !strings.Contains(ee.String(), "timed out") {
|
||||
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
ee, ok := err.(uexec.ExitError)
|
||||
if !ok {
|
||||
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
|
||||
}
|
||||
if !strings.Contains(ee.String(), "timed out") {
|
||||
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second))
|
||||
})
|
||||
|
||||
ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should support inline execution and attach", func(ctx context.Context) {
|
||||
waitForStdinContent := func(pod, content string) string {
|
||||
var logOutput string
|
||||
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod)
|
||||
return strings.Contains(logOutput, content), nil
|
||||
})
|
||||
|
||||
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("unexpected error waiting for '%v' output", content))
|
||||
return logOutput
|
||||
}
|
||||
|
||||
ginkgo.By("executing a command with run and attach with stdin")
|
||||
// We wait for a non-empty line so we know kubectl has attached
|
||||
e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
|
||||
WithStdinData("value\nabcd1234").
|
||||
ExecOrDie(ns)
|
||||
|
||||
runOutput := waitForStdinContent("run-test", "stdin closed")
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("executing a command with run and attach without stdin")
|
||||
// There is a race on this scenario described in #73099
|
||||
// It fails if we are not able to attach before the container prints
|
||||
// "stdin closed", but hasn't exited yet.
|
||||
// We wait 10 seconds before printing to give time to kubectl to attach
|
||||
// to the container, this does not solve the race though.
|
||||
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
|
||||
WithStdinData("abcd1234").
|
||||
ExecOrDie(ns)
|
||||
|
||||
runOutput = waitForStdinContent("run-test-2", "stdin closed")
|
||||
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
|
||||
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
|
||||
WithStdinData("abcd1234\n").
|
||||
ExecOrDie(ns)
|
||||
|
||||
runOutput = waitForStdinContent("run-test-3", "abcd1234")
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
|
||||
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
|
||||
framework.ExpectNoError(err)
|
||||
if !e2epod.CheckPodsRunningReady(ctx, c, ns, []string{runTestPod.Name}, time.Minute) {
|
||||
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
|
||||
}
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should contain last line of the log", func(ctx context.Context) {
|
||||
podName := "run-log-test"
|
||||
|
||||
ginkgo.By("executing a command with run")
|
||||
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
|
||||
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
framework.Failf("Pod for run-log-test was not ready")
|
||||
}
|
||||
|
||||
logOutput := e2ekubectl.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
|
||||
gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
|
||||
})
|
||||
|
||||
ginkgo.It("should support port-forward", func(ctx context.Context) {
|
||||
ginkgo.By("forwarding the container port to a local port")
|
||||
cmd := runPortForward(ns, simplePodName, simplePodPort)
|
||||
@ -815,6 +692,131 @@ metadata:
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("Kubectl run", func() {
|
||||
ginkgo.It("running a successful command", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("running a failing command", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
|
||||
ee, ok := err.(uexec.ExitError)
|
||||
if !ok {
|
||||
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
|
||||
}
|
||||
framework.ExpectEqual(ee.ExitStatus(), 42)
|
||||
})
|
||||
|
||||
ginkgo.It("[Slow] running a failing command without --restart=Never", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
ee, ok := err.(uexec.ExitError)
|
||||
if !ok {
|
||||
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
|
||||
}
|
||||
if !strings.Contains(ee.String(), "timed out") {
|
||||
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
ee, ok := err.(uexec.ExitError)
|
||||
if !ok {
|
||||
framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
|
||||
}
|
||||
if !strings.Contains(ee.String(), "timed out") {
|
||||
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second))
|
||||
})
|
||||
|
||||
ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) {
|
||||
_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should support inline execution and attach", func(ctx context.Context) {
|
||||
waitForStdinContent := func(pod, content string) string {
|
||||
var logOutput string
|
||||
err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod)
|
||||
return strings.Contains(logOutput, content), nil
|
||||
})
|
||||
|
||||
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("unexpected error waiting for '%v' output", content))
|
||||
return logOutput
|
||||
}
|
||||
|
||||
ginkgo.By("executing a command with run and attach with stdin")
|
||||
// We wait for a non-empty line so we know kubectl has attached
|
||||
e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
|
||||
WithStdinData("value\nabcd1234").
|
||||
ExecOrDie(ns)
|
||||
|
||||
runOutput := waitForStdinContent("run-test", "stdin closed")
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("executing a command with run and attach without stdin")
|
||||
// There is a race on this scenario described in #73099
|
||||
// It fails if we are not able to attach before the container prints
|
||||
// "stdin closed", but hasn't exited yet.
|
||||
// We wait 10 seconds before printing to give time to kubectl to attach
|
||||
// to the container, this does not solve the race though.
|
||||
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
|
||||
WithStdinData("abcd1234").
|
||||
ExecOrDie(ns)
|
||||
|
||||
runOutput = waitForStdinContent("run-test-2", "stdin closed")
|
||||
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
|
||||
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
|
||||
WithStdinData("abcd1234\n").
|
||||
ExecOrDie(ns)
|
||||
|
||||
runOutput = waitForStdinContent("run-test-3", "abcd1234")
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
|
||||
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
|
||||
framework.ExpectNoError(err)
|
||||
if !e2epod.CheckPodsRunningReady(ctx, c, ns, []string{runTestPod.Name}, time.Minute) {
|
||||
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
|
||||
}
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should contain last line of the log", func(ctx context.Context) {
|
||||
podName := "run-log-test"
|
||||
|
||||
ginkgo.By("executing a command with run")
|
||||
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
|
||||
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
framework.Failf("Pod for run-log-test was not ready")
|
||||
}
|
||||
|
||||
logOutput := e2ekubectl.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
|
||||
gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("Kubectl api-versions", func() {
|
||||
/*
|
||||
Release: v1.9
|
||||
|
Loading…
Reference in New Issue
Block a user