mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-08 20:50:24 +00:00
e2e: use Ginkgo context
All code must use the context from Ginkgo when doing API calls or polling for a change, otherwise the code would not return immediately when the test gets aborted.
This commit is contained in:
@@ -266,8 +266,8 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
|
||||
})
|
||||
}
|
||||
forEachPod := func(podFunc func(p v1.Pod)) {
|
||||
clusterState().ForEach(podFunc)
|
||||
forEachPod := func(ctx context.Context, podFunc func(p v1.Pod)) {
|
||||
_ = clusterState().ForEach(ctx, podFunc)
|
||||
}
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
@@ -280,11 +280,11 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
|
||||
// test files.
|
||||
// Print debug info if atLeast Pods are not found before the timeout
|
||||
waitForOrFailWithDebug := func(atLeast int) {
|
||||
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
|
||||
waitForOrFailWithDebug := func(ctx context.Context, atLeast int) {
|
||||
pods, err := clusterState().WaitFor(ctx, atLeast, framework.PodStartTimeout)
|
||||
if err != nil || len(pods) < atLeast {
|
||||
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
|
||||
e2edebug.DumpAllNamespaceInfo(f.ClientSet, ns)
|
||||
e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, ns)
|
||||
framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
|
||||
}
|
||||
}
|
||||
@@ -341,7 +341,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
|
||||
ginkgo.By("creating a replication controller")
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
|
||||
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -354,15 +354,15 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
|
||||
ginkgo.By("creating a replication controller")
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
|
||||
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
ginkgo.By("scaling down the replication controller")
|
||||
debugDiscovery()
|
||||
e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m")
|
||||
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
validateController(ctx, c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
ginkgo.By("scaling up the replication controller")
|
||||
debugDiscovery()
|
||||
e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m")
|
||||
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -402,17 +402,17 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("validating guestbook app")
|
||||
validateGuestbookApp(c, ns)
|
||||
validateGuestbookApp(ctx, c, ns)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("Simple pod", func() {
|
||||
var podYaml string
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
|
||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
|
||||
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true)
|
||||
framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{simplePodName}, framework.PodStartTimeout), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
|
||||
@@ -579,7 +579,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
if !strings.Contains(ee.String(), "timed out") {
|
||||
framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
|
||||
}
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
|
||||
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout))
|
||||
})
|
||||
|
||||
ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) {
|
||||
@@ -613,7 +613,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("executing a command with run and attach without stdin")
|
||||
// There is a race on this scenario described in #73099
|
||||
@@ -629,7 +629,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
|
||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
|
||||
ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
|
||||
e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
|
||||
@@ -643,11 +643,11 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
|
||||
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
|
||||
framework.ExpectNoError(err)
|
||||
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
|
||||
if !e2epod.CheckPodsRunningReady(ctx, c, ns, []string{runTestPod.Name}, time.Minute) {
|
||||
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
|
||||
}
|
||||
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(context.TODO(), "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should contain last line of the log", func(ctx context.Context) {
|
||||
@@ -656,7 +656,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
ginkgo.By("executing a command with run")
|
||||
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
|
||||
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
framework.Failf("Pod for run-log-test was not ready")
|
||||
}
|
||||
|
||||
@@ -693,11 +693,11 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
|
||||
ginkgo.By("adding rbac permissions")
|
||||
// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
|
||||
err := e2eauth.BindClusterRole(f.ClientSet.RbacV1(), "view", f.Namespace.Name,
|
||||
err := e2eauth.BindClusterRole(ctx, f.ClientSet.RbacV1(), "view", f.Namespace.Name,
|
||||
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = e2eauth.WaitForAuthorizationUpdate(f.ClientSet.AuthorizationV1(),
|
||||
err = e2eauth.WaitForAuthorizationUpdate(ctx, f.ClientSet.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -855,7 +855,7 @@ metadata:
|
||||
WithStdinReader(stdin).
|
||||
ExecOrDie(ns)
|
||||
ginkgo.By("checking the result")
|
||||
forEachReplicationController(c, ns, "app", "agnhost", validateReplicationControllerConfiguration)
|
||||
forEachReplicationController(ctx, c, ns, "app", "agnhost", validateReplicationControllerConfiguration)
|
||||
})
|
||||
ginkgo.It("should reuse port when apply to an existing SVC", func(ctx context.Context) {
|
||||
serviceJSON := readTestFileOrDie(agnhostServiceFilename)
|
||||
@@ -969,7 +969,7 @@ metadata:
|
||||
e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server")
|
||||
|
||||
ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting pod %s: %v", podName, err)
|
||||
}
|
||||
@@ -1281,10 +1281,10 @@ metadata:
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
|
||||
|
||||
ginkgo.By("Waiting for Agnhost primary to start.")
|
||||
waitForOrFailWithDebug(1)
|
||||
waitForOrFailWithDebug(ctx, 1)
|
||||
|
||||
// Pod
|
||||
forEachPod(func(pod v1.Pod) {
|
||||
forEachPod(ctx, func(pod v1.Pod) {
|
||||
output := e2ekubectl.RunKubectlOrDie(ns, "describe", "pod", pod.Name)
|
||||
requiredStrings := [][]string{
|
||||
{"Name:", "agnhost-primary-"},
|
||||
@@ -1336,7 +1336,7 @@ metadata:
|
||||
|
||||
// Node
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
node := nodes.Items[0]
|
||||
output = e2ekubectl.RunKubectlOrDie(ns, "describe", "node", node.Name)
|
||||
@@ -1377,7 +1377,7 @@ metadata:
|
||||
|
||||
ginkgo.By("waiting for cronjob to start.")
|
||||
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
cj, err := c.BatchV1().CronJobs(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
cj, err := c.BatchV1().CronJobs(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err)
|
||||
}
|
||||
@@ -1424,14 +1424,14 @@ metadata:
|
||||
|
||||
// It may take a while for the pods to get registered in some cases, wait to be sure.
|
||||
ginkgo.By("Waiting for Agnhost primary to start.")
|
||||
waitForOrFailWithDebug(1)
|
||||
forEachPod(func(pod v1.Pod) {
|
||||
waitForOrFailWithDebug(ctx, 1)
|
||||
forEachPod(ctx, func(pod v1.Pod) {
|
||||
framework.Logf("wait on agnhost-primary startup in %v ", ns)
|
||||
e2eoutput.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout)
|
||||
})
|
||||
validateService := func(name string, servicePort int, timeout time.Duration) {
|
||||
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
ep, err := c.CoreV1().Endpoints(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
ep, err := c.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// log the real error
|
||||
framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
|
||||
@@ -1462,7 +1462,7 @@ metadata:
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2eservice, err := c.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
e2eservice, err := c.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if len(e2eservice.Spec.Ports) != 1 {
|
||||
@@ -1479,23 +1479,23 @@ metadata:
|
||||
|
||||
ginkgo.By("exposing RC")
|
||||
e2ekubectl.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort))
|
||||
e2enetwork.WaitForService(c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout)
|
||||
framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout))
|
||||
validateService("rm2", 1234, framework.ServiceStartTimeout)
|
||||
|
||||
ginkgo.By("exposing service")
|
||||
e2ekubectl.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort))
|
||||
e2enetwork.WaitForService(c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout)
|
||||
framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout))
|
||||
validateService("rm3", 2345, framework.ServiceStartTimeout)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("Kubectl label", func() {
|
||||
var podYaml string
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ginkgo.By("creating the pod")
|
||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
|
||||
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true)
|
||||
framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{pausePodName}, framework.PodStartTimeout), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
|
||||
@@ -1530,11 +1530,11 @@ metadata:
|
||||
|
||||
ginkgo.Describe("Kubectl copy", func() {
|
||||
var podYaml string
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ginkgo.By("creating the pod")
|
||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
|
||||
framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true)
|
||||
framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
|
||||
@@ -1597,7 +1597,7 @@ metadata:
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for log generator to start.")
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
framework.Failf("Pod %s was not ready", podName)
|
||||
}
|
||||
|
||||
@@ -1654,14 +1654,14 @@ metadata:
|
||||
ginkgo.By("creating Agnhost RC")
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
|
||||
ginkgo.By("Waiting for Agnhost primary to start.")
|
||||
waitForOrFailWithDebug(1)
|
||||
waitForOrFailWithDebug(ctx, 1)
|
||||
ginkgo.By("patching all pods")
|
||||
forEachPod(func(pod v1.Pod) {
|
||||
forEachPod(ctx, func(pod v1.Pod) {
|
||||
e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
|
||||
})
|
||||
|
||||
ginkgo.By("checking annotations")
|
||||
forEachPod(func(pod v1.Pod) {
|
||||
forEachPod(ctx, func(pod v1.Pod) {
|
||||
found := false
|
||||
for key, val := range pod.Annotations {
|
||||
if key == "x" && val == "y" {
|
||||
@@ -1714,7 +1714,7 @@ metadata:
|
||||
ginkgo.By("running the image " + httpdImage)
|
||||
e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage)
|
||||
ginkgo.By("verifying the pod " + podName + " was created")
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting pod %s: %v", podName, err)
|
||||
}
|
||||
@@ -1766,7 +1766,7 @@ metadata:
|
||||
e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
|
||||
|
||||
ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting deployment %s: %v", podName, err)
|
||||
}
|
||||
@@ -1849,7 +1849,7 @@ metadata:
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
nodeName := scheduling.GetNodeThatCanRunPod(f)
|
||||
nodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
|
||||
|
||||
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
|
||||
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
|
||||
@@ -1880,7 +1880,7 @@ metadata:
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
nodeName := scheduling.GetNodeThatCanRunPod(f)
|
||||
nodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
|
||||
|
||||
ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
|
||||
runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
|
||||
@@ -1980,7 +1980,7 @@ metadata:
|
||||
e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000")
|
||||
|
||||
ginkgo.By("verifying that the quota was created")
|
||||
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
|
||||
quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting quota %s: %v", quotaName, err)
|
||||
}
|
||||
@@ -2008,7 +2008,7 @@ metadata:
|
||||
e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating")
|
||||
|
||||
ginkgo.By("verifying that the quota was created")
|
||||
quota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
|
||||
quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting quota %s: %v", quotaName, err)
|
||||
}
|
||||
@@ -2163,31 +2163,31 @@ func curl(url string) (string, error) {
|
||||
return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
|
||||
}
|
||||
|
||||
func validateGuestbookApp(c clientset.Interface, ns string) {
|
||||
func validateGuestbookApp(ctx context.Context, c clientset.Interface, ns string) {
|
||||
framework.Logf("Waiting for all frontend pods to be Running.")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
|
||||
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Waiting for frontend to serve content.")
|
||||
if !waitForGuestbookResponse(c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) {
|
||||
if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) {
|
||||
framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
|
||||
}
|
||||
|
||||
framework.Logf("Trying to add a new entry to the guestbook.")
|
||||
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) {
|
||||
if !waitForGuestbookResponse(ctx, c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) {
|
||||
framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
|
||||
}
|
||||
|
||||
framework.Logf("Verifying that added entry can be retrieved.")
|
||||
if !waitForGuestbookResponse(c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) {
|
||||
if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) {
|
||||
framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// Returns whether received expected response from guestbook on time.
|
||||
func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
res, err := makeRequestToGuestbook(c, cmd, arg, ns)
|
||||
func waitForGuestbookResponse(ctx context.Context, c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
|
||||
for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {
|
||||
res, err := makeRequestToGuestbook(ctx, c, cmd, arg, ns)
|
||||
if err == nil && res == expectedResponse {
|
||||
return true
|
||||
}
|
||||
@@ -2196,13 +2196,13 @@ func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse
|
||||
return false
|
||||
}
|
||||
|
||||
func makeRequestToGuestbook(c clientset.Interface, cmd, value string, ns string) (string, error) {
|
||||
func makeRequestToGuestbook(ctx context.Context, c clientset.Interface, cmd, value string, ns string) (string, error) {
|
||||
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
return "", errProxy
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
result, err := proxyRequest.Namespace(ns).
|
||||
@@ -2244,13 +2244,13 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader {
|
||||
return bytes.NewReader(data)
|
||||
}
|
||||
|
||||
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
|
||||
func forEachReplicationController(ctx context.Context, c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
|
||||
var rcs *v1.ReplicationControllerList
|
||||
var err error
|
||||
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
|
||||
for t := time.Now(); time.Since(t) < framework.PodListTimeout && ctx.Err() == nil; time.Sleep(framework.Poll) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
rcs, err = c.CoreV1().ReplicationControllers(ns).List(context.TODO(), options)
|
||||
rcs, err = c.CoreV1().ReplicationControllers(ns).List(ctx, options)
|
||||
framework.ExpectNoError(err)
|
||||
if len(rcs.Items) > 0 {
|
||||
break
|
||||
@@ -2281,13 +2281,13 @@ func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
|
||||
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
|
||||
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
|
||||
// in the container's json field.
|
||||
func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) error {
|
||||
func getUDData(jpgExpected string, ns string) func(context.Context, clientset.Interface, string) error {
|
||||
|
||||
// getUDData validates data.json in the update-demo (returns nil if data is ok).
|
||||
return func(c clientset.Interface, podID string) error {
|
||||
return func(ctx context.Context, c clientset.Interface, podID string) error {
|
||||
framework.Logf("validating pod %s", podID)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
body, err := c.CoreV1().RESTClient().Get().
|
||||
@@ -2296,7 +2296,7 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string)
|
||||
SubResource("proxy").
|
||||
Name(podID).
|
||||
Suffix("data.json").
|
||||
Do(context.TODO()).
|
||||
Do(ctx).
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
@@ -2373,7 +2373,7 @@ func trimDockerRegistry(imagename string) string {
|
||||
|
||||
// validatorFn is the function which is individual tests will implement.
|
||||
// we may want it to return more than just an error, at some point.
|
||||
type validatorFn func(c clientset.Interface, podID string) error
|
||||
type validatorFn func(ctx context.Context, c clientset.Interface, podID string) error
|
||||
|
||||
// validateController is a generic mechanism for testing RC's that are running.
|
||||
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
|
||||
@@ -2381,7 +2381,7 @@ type validatorFn func(c clientset.Interface, podID string) error
|
||||
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
|
||||
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
|
||||
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
|
||||
func validateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
|
||||
func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
|
||||
containerImage = trimDockerRegistry(containerImage)
|
||||
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
|
||||
|
||||
@@ -2391,7 +2391,7 @@ func validateController(c clientset.Interface, containerImage string, replicas i
|
||||
|
||||
ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
|
||||
waitLoop:
|
||||
for start := time.Now(); time.Since(start) < framework.PodStartTimeout; time.Sleep(5 * time.Second) {
|
||||
for start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {
|
||||
getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname)
|
||||
pods := strings.Fields(getPodsOutput)
|
||||
if numPods := len(pods); numPods != replicas {
|
||||
@@ -2415,7 +2415,7 @@ waitLoop:
|
||||
|
||||
// Call the generic validator function here.
|
||||
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
|
||||
if err := validator(c, podID); err != nil {
|
||||
if err := validator(ctx, c, podID); err != nil {
|
||||
framework.Logf("%s is running right image but validator function failed: %v", podID, err)
|
||||
continue waitLoop
|
||||
}
|
||||
|
@@ -124,8 +124,8 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string, bi
|
||||
}
|
||||
|
||||
// WaitForTerminatedContainer waits till a given container be terminated for a given pod.
|
||||
func WaitForTerminatedContainer(f *framework.Framework, pod *v1.Pod, containerName string) error {
|
||||
return e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
func WaitForTerminatedContainer(ctx context.Context, f *framework.Framework, pod *v1.Pod, containerName string) error {
|
||||
return e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
if len(testutils.TerminatedContainers(pod)[containerName]) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
@@ -207,13 +207,13 @@ func runPortForward(ns, podName string, port int) *portForwardCommand {
|
||||
}
|
||||
}
|
||||
|
||||
func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
|
||||
func doTestConnectSendDisconnect(ctx context.Context, bindAddress string, f *framework.Framework) {
|
||||
ginkgo.By("Creating the target pod")
|
||||
pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("Couldn't create pod: %v", err)
|
||||
}
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
framework.Failf("Pod did not start running: %v", err)
|
||||
}
|
||||
|
||||
@@ -242,26 +242,26 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for the target pod to stop running")
|
||||
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
|
||||
if err := WaitForTerminatedContainer(ctx, f, pod, "portforwardtester"); err != nil {
|
||||
framework.Failf("Container did not terminate: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
gomega.Eventually(ctx, func() (string, error) {
|
||||
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Done"),
|
||||
))
|
||||
}
|
||||
|
||||
func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
|
||||
func doTestMustConnectSendNothing(ctx context.Context, bindAddress string, f *framework.Framework) {
|
||||
ginkgo.By("Creating the target pod")
|
||||
pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress))
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("Couldn't create pod: %v", err)
|
||||
}
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
framework.Failf("Pod did not start running: %v", err)
|
||||
}
|
||||
|
||||
@@ -279,26 +279,26 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
|
||||
conn.Close()
|
||||
|
||||
ginkgo.By("Waiting for the target pod to stop running")
|
||||
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
|
||||
if err := WaitForTerminatedContainer(ctx, f, pod, "portforwardtester"); err != nil {
|
||||
framework.Failf("Container did not terminate: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
gomega.Eventually(ctx, func() (string, error) {
|
||||
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"),
|
||||
))
|
||||
}
|
||||
|
||||
func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) {
|
||||
func doTestMustConnectSendDisconnect(ctx context.Context, bindAddress string, f *framework.Framework) {
|
||||
ginkgo.By("Creating the target pod")
|
||||
pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("Couldn't create pod: %v", err)
|
||||
}
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
framework.Failf("Pod did not start running: %v", err)
|
||||
}
|
||||
|
||||
@@ -330,7 +330,7 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
|
||||
}
|
||||
|
||||
if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
|
||||
podlogs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
podlogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs of portforwardtester pod: %v", err)
|
||||
} else {
|
||||
@@ -345,13 +345,13 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for the target pod to stop running")
|
||||
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
|
||||
if err := WaitForTerminatedContainer(ctx, f, pod, "portforwardtester"); err != nil {
|
||||
framework.Failf("Container did not terminate: %v", err)
|
||||
}
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
gomega.Eventually(ctx, func() (string, error) {
|
||||
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Received expected client data"),
|
||||
@@ -359,16 +359,16 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
|
||||
))
|
||||
}
|
||||
|
||||
func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.Framework) {
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "unable to get base config")
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("Couldn't create pod: %v", err)
|
||||
}
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout); err != nil {
|
||||
framework.Failf("Pod did not start running: %v", err)
|
||||
}
|
||||
|
||||
@@ -386,7 +386,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
}
|
||||
defer ws.Close()
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
gomega.Eventually(ctx, func() error {
|
||||
channel, msg, err := wsRead(ws)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
|
||||
@@ -400,7 +400,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
return nil
|
||||
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
|
||||
|
||||
gomega.Eventually(func() error {
|
||||
gomega.Eventually(ctx, func() error {
|
||||
channel, msg, err := wsRead(ws)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
|
||||
@@ -423,7 +423,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
ginkgo.By("Reading data from the local port")
|
||||
buf := bytes.Buffer{}
|
||||
expectedData := bytes.Repeat([]byte("x"), 100)
|
||||
gomega.Eventually(func() error {
|
||||
gomega.Eventually(ctx, func() error {
|
||||
channel, msg, err := wsRead(ws)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
|
||||
@@ -439,8 +439,8 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
gomega.Eventually(ctx, func() (string, error) {
|
||||
return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Received expected client data"),
|
||||
@@ -454,21 +454,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() {
|
||||
ginkgo.Describe("With a server listening on 0.0.0.0", func() {
|
||||
ginkgo.Describe("that expects a client request", func() {
|
||||
ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func(ctx context.Context) {
|
||||
doTestMustConnectSendNothing("0.0.0.0", f)
|
||||
doTestMustConnectSendNothing(ctx, "0.0.0.0", f)
|
||||
})
|
||||
ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) {
|
||||
doTestMustConnectSendDisconnect("0.0.0.0", f)
|
||||
doTestMustConnectSendDisconnect(ctx, "0.0.0.0", f)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("that expects NO client request", func() {
|
||||
ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) {
|
||||
doTestConnectSendDisconnect("0.0.0.0", f)
|
||||
doTestConnectSendDisconnect(ctx, "0.0.0.0", f)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should support forwarding over websockets", func(ctx context.Context) {
|
||||
doTestOverWebSockets("0.0.0.0", f)
|
||||
doTestOverWebSockets(ctx, "0.0.0.0", f)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -476,21 +476,21 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() {
|
||||
ginkgo.Describe("With a server listening on localhost", func() {
|
||||
ginkgo.Describe("that expects a client request", func() {
|
||||
ginkgo.It("should support a client that connects, sends NO DATA, and disconnects", func(ctx context.Context) {
|
||||
doTestMustConnectSendNothing("localhost", f)
|
||||
doTestMustConnectSendNothing(ctx, "localhost", f)
|
||||
})
|
||||
ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) {
|
||||
doTestMustConnectSendDisconnect("localhost", f)
|
||||
doTestMustConnectSendDisconnect(ctx, "localhost", f)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("that expects NO client request", func() {
|
||||
ginkgo.It("should support a client that connects, sends DATA, and disconnects", func(ctx context.Context) {
|
||||
doTestConnectSendDisconnect("localhost", f)
|
||||
doTestConnectSendDisconnect(ctx, "localhost", f)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should support forwarding over websockets", func(ctx context.Context) {
|
||||
doTestOverWebSockets("localhost", f)
|
||||
doTestOverWebSockets(ctx, "localhost", f)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
Reference in New Issue
Block a user